]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorJohn W. Linville <linville@tuxdriver.com>
Thu, 5 Jan 2012 15:12:45 +0000 (10:12 -0500)
committerJohn W. Linville <linville@tuxdriver.com>
Thu, 5 Jan 2012 15:13:24 +0000 (10:13 -0500)
Conflicts:
drivers/net/wireless/b43legacy/dma.c

2049 files changed:
CREDITS
Documentation/ABI/testing/sysfs-block
Documentation/ABI/testing/sysfs-bus-rbd
Documentation/DocBook/uio-howto.tmpl
Documentation/blockdev/cciss.txt
Documentation/cgroups/memory.txt
Documentation/cgroups/net_prio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/calxeda-xgmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/can/cc770.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/filesystems/btrfs.txt
Documentation/i2c/ten-bit-addresses
Documentation/kernel-parameters.txt
Documentation/networking/00-INDEX
Documentation/networking/batman-adv.txt
Documentation/networking/bonding.txt
Documentation/networking/ieee802154.txt
Documentation/networking/ifenslave.c
Documentation/networking/ip-sysctl.txt
Documentation/networking/openvswitch.txt [new file with mode: 0644]
Documentation/networking/packet_mmap.txt
Documentation/networking/scaling.txt
Documentation/networking/stmmac.txt
Documentation/networking/team.txt [new file with mode: 0644]
Documentation/power/devices.txt
Documentation/power/runtime_pm.txt
Documentation/serial/serial-rs485.txt
Documentation/sound/alsa/HD-Audio.txt
Documentation/sound/alsa/soc/machine.txt
Documentation/usb/linux-cdc-acm.inf
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/common/gic.c
arch/arm/common/pl330.c
arch/arm/configs/at91cap9_defconfig [moved from arch/arm/configs/at91cap9adk_defconfig with 93% similarity]
arch/arm/configs/at91rm9200_defconfig
arch/arm/configs/at91sam9260_defconfig [moved from arch/arm/configs/at91sam9260ek_defconfig with 86% similarity]
arch/arm/configs/at91sam9g20_defconfig [moved from arch/arm/configs/at91sam9g20ek_defconfig with 90% similarity]
arch/arm/configs/at91sam9g45_defconfig
arch/arm/configs/at91sam9rl_defconfig [moved from arch/arm/configs/at91sam9rlek_defconfig with 94% similarity]
arch/arm/configs/ezx_defconfig
arch/arm/configs/imote2_defconfig
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/magician_defconfig
arch/arm/configs/omap1_defconfig
arch/arm/configs/u300_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/configs/zeus_defconfig
arch/arm/include/asm/hardware/cache-l2x0.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/pmu.h
arch/arm/include/asm/topology.h
arch/arm/include/asm/unistd.h
arch/arm/include/asm/unwind.h
arch/arm/kernel/calls.S
arch/arm/kernel/entry-armv.S
arch/arm/kernel/head.S
arch/arm/kernel/kprobes-arm.c
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-test-thumb.c
arch/arm/kernel/kprobes-test.h
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/pmu.c
arch/arm/kernel/process.c
arch/arm/kernel/setup.c
arch/arm/kernel/topology.c
arch/arm/kernel/unwind.c
arch/arm/lib/bitops.h
arch/arm/lib/changebit.S
arch/arm/lib/clearbit.S
arch/arm/lib/setbit.S
arch/arm/lib/testchangebit.S
arch/arm/lib/testclearbit.S
arch/arm/lib/testsetbit.S
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/arm/mach-bcmring/core.c
arch/arm/mach-bcmring/dma.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-davinci/psc.c
arch/arm/mach-exynos/cpu.c
arch/arm/mach-exynos/cpuidle.c
arch/arm/mach-exynos/mct.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/clock-imx35.c
arch/arm/mach-imx/clock-imx6q.c
arch/arm/mach-imx/mach-cpuimx35.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-imx/src.c
arch/arm/mach-mmp/gplugd.c
arch/arm/mach-mmp/include/mach/gpio-pxa.h
arch/arm/mach-msm/devices-iommu.c
arch/arm/mach-mx5/board-mx51_babbage.c
arch/arm/mach-mx5/board-mx53_evk.c
arch/arm/mach-mx5/board-mx53_loco.c
arch/arm/mach-mx5/board-mx53_smd.c
arch/arm/mach-mx5/cpu.c
arch/arm/mach-mx5/imx51-dt.c
arch/arm/mach-mx5/imx53-dt.c
arch/arm/mach-mx5/mm.c
arch/arm/mach-mxs/clock-mx28.c
arch/arm/mach-mxs/include/mach/mx28.h
arch/arm/mach-mxs/include/mach/mxs.h
arch/arm/mach-mxs/mach-m28evk.c
arch/arm/mach-mxs/mach-stmp378x_devb.c
arch/arm/mach-mxs/module-tx28.c
arch/arm/mach-omap1/Kconfig
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/clock.h
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap1/devices.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/cpuidle34xx.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/display.h [new file with mode: 0644]
arch/arm/mach-omap2/io.h [deleted file]
arch/arm/mach-omap2/mcbsp.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_2420_data.c
arch/arm/mach-omap2/omap_hwmod_2430_data.c
arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_hwmod_common_data.c
arch/arm/mach-omap2/omap_hwmod_common_data.h
arch/arm/mach-omap2/omap_l3_noc.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-omap2/twl-common.h
arch/arm/mach-prima2/pm.c
arch/arm/mach-prima2/prima2.c
arch/arm/mach-pxa/balloon3.c
arch/arm/mach-pxa/colibri-pxa320.c
arch/arm/mach-pxa/gumstix.c
arch/arm/mach-pxa/include/mach/palm27x.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/palmtc.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/mach-crag6410-module.c
arch/arm/mach-s3c64xx/s3c6400.c
arch/arm/mach-s3c64xx/setup-fb-24bpp.c
arch/arm/mach-s5pv210/mach-smdkv210.c
arch/arm/mach-sa1100/Makefile.boot
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-kota2.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/pm-sh7372.c
arch/arm/mach-w90x900/dev.c
arch/arm/mach-w90x900/include/mach/mfp.h
arch/arm/mach-w90x900/include/mach/nuc900_spi.h
arch/arm/mach-w90x900/mfp.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmap.c
arch/arm/mm/proc-v7.S
arch/arm/oprofile/common.c
arch/arm/plat-mxc/cpufreq.c
arch/arm/plat-mxc/include/mach/common.h
arch/arm/plat-mxc/include/mach/mxc.h
arch/arm/plat-mxc/include/mach/system.h
arch/arm/plat-mxc/include/mach/uncompress.h
arch/arm/plat-mxc/pwm.c
arch/arm/plat-mxc/system.c
arch/arm/plat-omap/include/plat/clock.h
arch/arm/plat-omap/include/plat/common.h
arch/arm/plat-orion/gpio.c
arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
arch/arm/plat-s5p/sysmmu.c
arch/arm/plat-samsung/dev-backlight.c
arch/arm/plat-samsung/include/plat/cpu-freq-core.h
arch/arm/plat-samsung/include/plat/gpio-cfg.h
arch/arm/plat-samsung/pd.c
arch/arm/plat-samsung/pwm.c
arch/arm/tools/mach-types
arch/cris/arch-v10/drivers/Kconfig
arch/cris/arch-v32/drivers/Kconfig
arch/ia64/include/asm/cputime.h
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/microblaze/include/asm/namei.h [deleted file]
arch/mips/cavium-octeon/flash_setup.c
arch/mips/cavium-octeon/smp.c
arch/mips/emma/common/prom.c
arch/mips/include/asm/ip32/mace.h
arch/mips/include/asm/mach-bcm47xx/gpio.h
arch/mips/include/asm/unistd.h
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cpufreq/loongson2_clock.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/traps.c
arch/mips/lantiq/clk.c
arch/mips/lantiq/devices.c
arch/mips/lantiq/prom.c
arch/mips/lantiq/setup.c
arch/mips/lantiq/xway/clk-ase.c
arch/mips/lantiq/xway/clk-xway.c
arch/mips/lantiq/xway/devices.c
arch/mips/lantiq/xway/dma.c
arch/mips/lantiq/xway/gpio.c
arch/mips/lantiq/xway/gpio_ebu.c
arch/mips/lantiq/xway/gpio_stp.c
arch/mips/lantiq/xway/prom-ase.c
arch/mips/lantiq/xway/prom-xway.c
arch/mips/lantiq/xway/reset.c
arch/mips/nxp/pnx8550/common/pci.c [deleted file]
arch/mips/nxp/pnx8550/common/setup.c [deleted file]
arch/mips/pci/pci-alchemy.c
arch/mips/pci/pci-lantiq.c
arch/mips/pmc-sierra/yosemite/prom.c
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/boot/dts/p1023rds.dts
arch/powerpc/boot/dts/tqm8548-bigflash.dts
arch/powerpc/boot/dts/tqm8548.dts
arch/powerpc/boot/dts/tqm8xx.dts
arch/powerpc/configs/ppc44x_defconfig
arch/powerpc/include/asm/atomic.h
arch/powerpc/include/asm/bitops.h
arch/powerpc/include/asm/cputime.h
arch/powerpc/include/asm/futex.h
arch/powerpc/include/asm/kvm.h
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/reg_booke.h
arch/powerpc/include/asm/sections.h
arch/powerpc/include/asm/synch.h
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/jump_label.c
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/traps.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/e500.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/feature-fixups.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/platforms/85xx/Kconfig
arch/powerpc/platforms/85xx/p3060_qds.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/ps3/interrupt.c
arch/powerpc/platforms/ps3/platform.h
arch/powerpc/platforms/ps3/smp.c
arch/powerpc/sysdev/ehv_pic.c
arch/powerpc/sysdev/fsl_lbc.c
arch/powerpc/sysdev/qe_lib/qe.c
arch/s390/Kconfig
arch/s390/crypto/crypt_s390.h
arch/s390/include/asm/cputime.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/timex.h
arch/s390/include/asm/unistd.h
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/early.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/topology.c
arch/s390/kernel/vmlinux.lds.S
arch/s390/kvm/diag.c
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/kvm/sigp.c
arch/s390/mm/fault.c
arch/s390/oprofile/init.c
arch/sh/boards/board-sh7757lcr.c
arch/sh/oprofile/common.c
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/ds.c
arch/sparc/kernel/entry.h
arch/sparc/kernel/module.c
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/prom_common.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sigutil_64.c
arch/sparc/mm/Makefile
arch/sparc/mm/btfixup.c
arch/sparc/mm/generic_32.c [deleted file]
arch/sparc/mm/generic_64.c [deleted file]
arch/tile/include/asm/irq.h
arch/tile/kernel/irq.c
arch/tile/kernel/pci-dma.c
arch/tile/kernel/pci.c
arch/tile/kernel/sysfs.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
arch/unicore32/Kconfig
arch/unicore32/Kconfig.debug
arch/unicore32/boot/compressed/Makefile
arch/unicore32/include/asm/bitops.h
arch/unicore32/include/asm/processor.h
arch/unicore32/kernel/ksyms.c
arch/unicore32/lib/findbit.S
arch/x86/Kconfig
arch/x86/include/asm/apic.h
arch/x86/include/asm/intel_scu_ipc.h
arch/x86/include/asm/mach_traps.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/mrst.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/system.h
arch/x86/include/asm/timer.h
arch/x86/include/asm/uv/uv_mmrs.h
arch/x86/include/asm/x86_init.h
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mcheck/mce-inject.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/nmi.c
arch/x86/kernel/process.c
arch/x86/kernel/quirks.c
arch/x86/kernel/reboot.c
arch/x86/kernel/rtc.c
arch/x86/kernel/setup.c
arch/x86/kernel/x86_init.c
arch/x86/kvm/i8254.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/gup.c
arch/x86/mm/highmem_32.c
arch/x86/net/bpf_jit_comp.c
arch/x86/oprofile/init.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/mrst/mrst.c
arch/x86/um/asm/processor.h
arch/x86/xen/enlighten.c
arch/x86/xen/grant-table.c
arch/x86/xen/setup.c
block/blk-core.c
block/blk-map.c
block/blk-tag.c
block/cfq-iosched.c
block/genhd.c
drivers/acpi/apei/erst.c
drivers/ata/Kconfig
drivers/ata/ahci_platform.c
drivers/ata/libata-sff.c
drivers/atm/iphase.c
drivers/base/core.c
drivers/base/node.c
drivers/base/power/clock_ops.c
drivers/base/power/main.c
drivers/base/power/qos.c
drivers/block/cciss.c
drivers/block/cciss_scsi.c
drivers/block/loop.c
drivers/block/paride/pg.c
drivers/block/rbd.c
drivers/block/swim3.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/char/random.c
drivers/crypto/mv_cesa.c
drivers/devfreq/Kconfig
drivers/devfreq/devfreq.c
drivers/dma/Kconfig
drivers/edac/mpc85xx_edac.c
drivers/firmware/dmi_scan.c
drivers/firmware/efivars.c
drivers/firmware/iscsi_ibft.c
drivers/firmware/iscsi_ibft_find.c
drivers/firmware/sigma.c
drivers/gpio/Makefile
drivers/gpio/gpio-da9052.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-pl061.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_buf.h
drivers/gpu/drm/exynos/exynos_drm_connector.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_encoder.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvc0_graph.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/vga/vgaarb.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hwmon/Kconfig
drivers/hwmon/ad7314.c
drivers/hwmon/ads7871.c
drivers/hwmon/exynos4_tmu.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/jz4740-hwmon.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/s3c-hwmon.c
drivers/hwmon/sch5627.c
drivers/hwmon/sch5636.c
drivers/hwmon/twl4030-madc-hwmon.c
drivers/hwmon/ultra45_env.c
drivers/hwmon/wm831x-hwmon.c
drivers/hwmon/wm8350-hwmon.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-nuc900.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-dev.c
drivers/ide/cy82c693.c
drivers/ide/icside.c
drivers/ide/piix.c
drivers/ide/triflex.c
drivers/ieee802154/fakehard.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/input/misc/cma3000_d0x.c
drivers/input/mouse/elantech.c
drivers/input/mouse/sentelic.c
drivers/input/mouse/sentelic.h
drivers/input/mouse/synaptics.c
drivers/input/serio/ams_delta_serio.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/wacom_wac.c
drivers/iommu/intel-iommu.c
drivers/iommu/intr_remapping.c
drivers/iommu/iommu.c
drivers/isdn/divert/divert_procfs.c
drivers/isdn/gigaset/i4l.c
drivers/isdn/i4l/isdn_net.c
drivers/leds/led-class.c
drivers/lguest/lguest_device.c
drivers/md/bitmap.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/common/tuners/mxl5007t.c
drivers/media/common/tuners/tda18218.c
drivers/media/rc/ati_remote.c
drivers/media/rc/keymaps/rc-ati-x10.c
drivers/media/rc/keymaps/rc-medion-x10.c
drivers/media/rc/keymaps/rc-snapstream-firefly.c
drivers/media/video/au0828/au0828-cards.c
drivers/media/video/gspca/gspca.c
drivers/media/video/m5mols/m5mols.h
drivers/media/video/m5mols/m5mols_core.c
drivers/media/video/mt9m111.c
drivers/media/video/mt9t112.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/omap1_camera.c
drivers/media/video/omap24xxcam-dma.c
drivers/media/video/omap3isp/ispccdc.c
drivers/media/video/omap3isp/ispstat.c
drivers/media/video/omap3isp/ispvideo.c
drivers/media/video/ov6650.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-core.h
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-reg.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/s5p-tv/mixer_video.c
drivers/media/video/sh_mobile_ceu_camera.c
drivers/media/video/sh_mobile_csi2.c
drivers/media/video/soc_camera.c
drivers/mfd/ab5500-debugfs.c
drivers/mfd/ab8500-core.c
drivers/mfd/adp5520.c
drivers/mfd/da903x.c
drivers/mfd/jz4740-adc.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/wm8994-core.c
drivers/misc/Kconfig
drivers/misc/ad525x_dpot.h
drivers/misc/carma/carma-fpga-program.c
drivers/misc/carma/carma-fpga.c
drivers/misc/eeprom/Kconfig
drivers/misc/eeprom/eeprom_93cx6.c
drivers/misc/pch_phub.c
drivers/misc/sgi-xp/xpnet.c
drivers/misc/spear13xx_pcie_gadget.c
drivers/mmc/card/block.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-cns3xxx.c
drivers/mmc/host/sdhci-dove.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-of-hlwd.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pltfm.c
drivers/mmc/host/sdhci-pltfm.h
drivers/mmc/host/sdhci-pxav2.c
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/vub300.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/ndfc.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/arcnet/Kconfig
drivers/net/bonding/bond_ipv6.c [deleted file]
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/caif/caif_hsi.c
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_shmcore.c
drivers/net/caif/caif_spi.c
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/Kconfig [new file with mode: 0644]
drivers/net/can/cc770/Makefile [new file with mode: 0644]
drivers/net/can/cc770/cc770.c [new file with mode: 0644]
drivers/net/can/cc770/cc770.h [new file with mode: 0644]
drivers/net/can/cc770/cc770_isa.c [new file with mode: 0644]
drivers/net/can/cc770/cc770_platform.c [new file with mode: 0644]
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/mscan/mscan.c
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sja1000/sja1000_isa.c
drivers/net/can/sja1000/sja1000_of_platform.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/ti_hecc.c
drivers/net/can/vcan.c
drivers/net/dsa/Kconfig [new file with mode: 0644]
drivers/net/dsa/Makefile [new file with mode: 0644]
drivers/net/dsa/mv88e6060.c [moved from net/dsa/mv88e6060.c with 96% similarity]
drivers/net/dsa/mv88e6123_61_65.c [moved from net/dsa/mv88e6123_61_65.c with 96% similarity]
drivers/net/dsa/mv88e6131.c [moved from net/dsa/mv88e6131.c with 96% similarity]
drivers/net/dsa/mv88e6xxx.c [moved from net/dsa/mv88e6xxx.c with 93% similarity]
drivers/net/dsa/mv88e6xxx.h [moved from net/dsa/mv88e6xxx.h with 95% similarity]
drivers/net/dummy.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/8390/8390.h
drivers/net/ethernet/8390/apne.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/8390/es3210.c
drivers/net/ethernet/8390/hp-plus.c
drivers/net/ethernet/8390/hp.c
drivers/net/ethernet/8390/hydra.c
drivers/net/ethernet/8390/lne390.c
drivers/net/ethernet/8390/ne-h8300.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/8390/ne2.c
drivers/net/ethernet/8390/ne2k-pci.c
drivers/net/ethernet/8390/ne3210.c
drivers/net/ethernet/8390/stnic.c
drivers/net/ethernet/8390/zorro8390.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/amd8111e.h
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/sunlance.c
drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/atheros/atlx/atlx.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/Makefile
drivers/net/ethernet/brocade/bna/bfa_cee.c
drivers/net/ethernet/brocade/bna/bfa_cee.h
drivers/net/ethernet/brocade/bna/bfa_defs.h
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bfa_ioc.h
drivers/net/ethernet/brocade/bna/bfi.h
drivers/net/ethernet/brocade/bna/bna_enet.c
drivers/net/ethernet/brocade/bna/bna_types.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/brocade/bna/bnad_debugfs.c [new file with mode: 0644]
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/brocade/bna/cna.h
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/calxeda/Kconfig [new file with mode: 0644]
drivers/net/ethernet/calxeda/Makefile [new file with mode: 0644]
drivers/net/ethernet/calxeda/xgmac.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb/sge.c
drivers/net/ethernet/chelsio/cxgb/sge.h
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb3/l2t.c
drivers/net/ethernet/chelsio/cxgb3/l2t.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cisco/enic/enic_dev.c
drivers/net/ethernet/cisco/enic/enic_dev.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dlink/de600.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth.h
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
drivers/net/ethernet/i825xx/eepro.c
drivers/net/ethernet/ibm/ehea/ehea.h
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/iseries_veth.c
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_hw.h
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.h
drivers/net/ethernet/intel/ixgbevf/regs.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/jme.h
drivers/net/ethernet/korina.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/marvell/sky2.h
drivers/net/ethernet/mellanox/mlx4/Makefile
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_port.h
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/icm.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/sense.c
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/net/ethernet/micrel/Kconfig
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ks8851.h
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
drivers/net/ethernet/natsemi/jazzsonic.c
drivers/net/ethernet/natsemi/macsonic.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/natsemi/xtsonic.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
drivers/net/ethernet/pasemi/Makefile
drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon.c
drivers/net/ethernet/sfc/filter.c
drivers/net/ethernet/sfc/filter.h
drivers/net/ethernet/sfc/mtd.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/smsc/smsc911x.h
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c [new file with mode: 0644]
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/ethernet/xircom/xirc2ps_cs.c
drivers/net/hippi/Kconfig
drivers/net/ifb.c
drivers/net/irda/bfin_sir.c
drivers/net/irda/donauboe.c
drivers/net/irda/pxaficp_ir.c
drivers/net/irda/sh_irda.c
drivers/net/irda/sh_sir.c
drivers/net/irda/smsc-ircc2.c
drivers/net/loopback.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/mii.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/mdio-bitbang.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/phy_device.c
drivers/net/phy/smsc.c
drivers/net/phy/spi_ks8995.c [new file with mode: 0644]
drivers/net/ppp/pptp.c
drivers/net/team/Kconfig [new file with mode: 0644]
drivers/net/team/Makefile [new file with mode: 0644]
drivers/net/team/team.c [new file with mode: 0644]
drivers/net/team/team_mode_activebackup.c [new file with mode: 0644]
drivers/net/team/team_mode_roundrobin.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/usb/asix.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lg-vl600.c
drivers/net/usb/pegasus.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wan/sbni.c
drivers/net/wan/sealevel.c
drivers/net/wimax/i2400m/tx.c
drivers/net/wimax/i2400m/usb-tx.c
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/leds.c
drivers/net/wireless/b43/lo.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_g.c
drivers/net/wireless/b43/phy_lp.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/pio.c
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/leds.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/b43legacy/radio.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmsmac/dma.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/iwmc3200wifi/main.c
drivers/net/wireless/iwmc3200wifi/rx.c
drivers/net/wireless/libertas/if_cs.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/main.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rayctl.h
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/of/irq.c
drivers/of/platform.c
drivers/oprofile/oprof.c
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofilefs.c
drivers/oprofile/timer_int.c
drivers/pci/Kconfig
drivers/pci/ats.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/hotplug/shpchp_core.c
drivers/pci/hotplug/shpchp_hpc.c
drivers/pci/iov.c
drivers/pci/pci.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/toshiba_acpi.c
drivers/power/intel_mid_battery.c
drivers/ptp/ptp_clock.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/regulator/aat2870-regulator.c
drivers/regulator/core.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/twl-regulator.c
drivers/rtc/class.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-puv3.c
drivers/rtc/rtc-s3c.c
drivers/s390/char/zcore.c
drivers/s390/cio/chsc.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/io_sch.h
drivers/s390/crypto/ap_bus.c
drivers/s390/kvm/kvm_virtio.c
drivers/s390/net/Kconfig
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/s390/scsi/zfcp_scsi.c
drivers/sbus/char/bbc_i2c.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/sbus/char/flash.c
drivers/sbus/char/uctrl.c
drivers/scsi/aacraid/linit.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/hpsa.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/spi/Kconfig
drivers/spi/spi-ath79.c
drivers/spi/spi-gpio.c
drivers/spi/spi-nuc900.c
drivers/spi/spi-pl022.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/usbduxsigma.c
drivers/staging/et131x/Kconfig
drivers/staging/et131x/et131x.c
drivers/staging/iio/industrialio-core.c
drivers/staging/media/as102/as102_drv.c
drivers/staging/media/as102/as102_drv.h
drivers/staging/octeon/ethernet-tx.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rts_pstor/rtsx.c
drivers/staging/slicoss/Kconfig
drivers/staging/tidspbridge/core/dsp-clock.c
drivers/staging/tidspbridge/rmgr/drv_interface.c
drivers/staging/usbip/vhci_rx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/tty/hvc/hvc_dcc.c
drivers/tty/serial/Kconfig
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/crisv10.c
drivers/tty/serial/mfd.c
drivers/tty/serial/pch_uart.c
drivers/tty/tty_ldisc.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/ci13xxx_msm.c
drivers/usb/gadget/ci13xxx_udc.c
drivers/usb/gadget/epautoconf.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/f_midi.c
drivers/usb/gadget/f_phonet.c
drivers/usb/gadget/f_serial.c
drivers/usb/gadget/file_storage.c
drivers/usb/gadget/fsl_mxc_udc.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/inode.c
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/pch_udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/s3c-hsudc.c
drivers/usb/gadget/udc-core.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-xls.c
drivers/usb/host/isp1760-if.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/ohci.h
drivers/usb/host/pci-quirks.c
drivers/usb/host/whci/qset.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/musb/Kconfig
drivers/usb/musb/am35x.c
drivers/usb/musb/da8xx.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/renesas_usbhs/common.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/mod.h
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/mod_host.c
drivers/usb/serial/ark3116.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/storage/ene_ub6250.c
drivers/usb/storage/protocol.c
drivers/usb/storage/unusual_devs.h
drivers/video/da8xx-fb.c
drivers/video/omap/dispc.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/hdmi.c
drivers/video/via/share.h
drivers/virtio/Kconfig
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/adx_wdt.c [deleted file]
drivers/watchdog/coh901327_wdt.c
drivers/watchdog/hpwdt.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/s3c2410_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/wm831x_wdt.c
drivers/xen/balloon.c
drivers/xen/gntalloc.c
drivers/xen/gntdev.c
drivers/xen/swiotlb-xen.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_xs.c
firmware/README.AddingFirmware
fs/bio.c
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h
fs/btrfs/backref.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/readdir.c
fs/cifs/smbencrypt.c
fs/configfs/inode.c
fs/configfs/mount.c
fs/dcache.c
fs/dlm/lowcomms.c
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ext4/balloc.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/inode.c
fs/locks.c
fs/minix/bitmap.c
fs/minix/inode.c
fs/minix/minix.h
fs/namespace.c
fs/ncpfs/inode.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pnfs.c
fs/nfs/proc.c
fs/nfs/read.c
fs/nfs/super.c
fs/nilfs2/ioctl.c
fs/ocfs2/alloc.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/netdebug.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/cluster/tcp.h
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmlock.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmthread.c
fs/ocfs2/dlmglue.c
fs/ocfs2/extent_map.c
fs/ocfs2/extent_map.h
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/inode.h
fs/ocfs2/ioctl.c
fs/ocfs2/journal.c
fs/ocfs2/journal.h
fs/ocfs2/mmap.c
fs/ocfs2/move_extents.c
fs/ocfs2/ocfs2.h
fs/ocfs2/quota_local.c
fs/ocfs2/slot_map.c
fs/ocfs2/stack_o2cb.c
fs/ocfs2/super.c
fs/ocfs2/xattr.c
fs/proc/meminfo.c
fs/proc/root.c
fs/proc/stat.c
fs/pstore/platform.c
fs/seq_file.c
fs/ubifs/super.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_export.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_log.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_super.c
fs/xfs/xfs_sync.c
fs/xfs/xfs_sync.h
fs/xfs/xfs_trace.h
include/asm-generic/cputime.h
include/asm-generic/unistd.h
include/drm/drm_mode.h
include/drm/drm_pciids.h
include/drm/exynos_drm.h
include/drm/radeon_drm.h
include/linux/Kbuild
include/linux/atmdev.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/can/platform/cc770.h [new file with mode: 0644]
include/linux/ceph/osd_client.h
include/linux/cgroup_subsys.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/dcache.h
include/linux/device.h
include/linux/dma_remapping.h
include/linux/dynamic_queue_limits.h [new file with mode: 0644]
include/linux/eeprom_93cx6.h
include/linux/errqueue.h
include/linux/ethtool.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/genetlink.h
include/linux/genhd.h
include/linux/hugetlb.h
include/linux/i2c.h
include/linux/if.h
include/linux/if_ether.h
include/linux/if_team.h [new file with mode: 0644]
include/linux/if_vlan.h
include/linux/inet_diag.h
include/linux/init_task.h
include/linux/ipv6.h
include/linux/kvm.h
include/linux/lglock.h
include/linux/lockd/lockd.h
include/linux/log2.h
include/linux/mdio-bitbang.h
include/linux/mdio-gpio.h
include/linux/memcontrol.h
include/linux/mfd/tps65910.h
include/linux/mii.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mm.h
include/linux/mmc/card.h
include/linux/neighbour.h
include/linux/netdev_features.h [new file with mode: 0644]
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/Kbuild
include/linux/netfilter/nf_conntrack_common.h
include/linux/netfilter/nf_conntrack_tuple_common.h
include/linux/netfilter/nf_nat.h [new file with mode: 0644]
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/nfnetlink_acct.h [new file with mode: 0644]
include/linux/netfilter/xt_CT.h
include/linux/netfilter/xt_ecn.h [new file with mode: 0644]
include/linux/netfilter/xt_nfacct.h [new file with mode: 0644]
include/linux/netfilter/xt_rpfilter.h [new file with mode: 0644]
include/linux/netfilter_ipv4/Kbuild
include/linux/netfilter_ipv4/ipt_ecn.h
include/linux/netfilter_ipv4/nf_nat.h [deleted file]
include/linux/netlink.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/openvswitch.h [new file with mode: 0644]
include/linux/pci-ats.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/phonet.h
include/linux/pkt_sched.h
include/linux/pm.h
include/linux/pm_runtime.h
include/linux/pstore.h
include/linux/sched.h
include/linux/security.h
include/linux/serial.h
include/linux/shrinker.h
include/linux/sigma.h
include/linux/skbuff.h
include/linux/smscphy.h [new file with mode: 0644]
include/linux/sock_diag.h [new file with mode: 0644]
include/linux/sunrpc/clnt.h
include/linux/tcp.h
include/linux/unix_diag.h [new file with mode: 0644]
include/linux/virtio_config.h
include/linux/virtio_mmio.h
include/linux/vmalloc.h
include/media/soc_camera.h
include/net/addrconf.h
include/net/af_unix.h
include/net/arp.h
include/net/atmclip.h
include/net/bluetooth/l2cap.h
include/net/caif/caif_dev.h
include/net/caif/caif_layer.h
include/net/caif/caif_spi.h
include/net/caif/cfcnfg.h
include/net/caif/cfserl.h
include/net/dsa.h
include/net/dst.h
include/net/dst_ops.h
include/net/flow.h
include/net/flow_keys.h [new file with mode: 0644]
include/net/genetlink.h
include/net/icmp.h
include/net/ieee802154.h
include/net/inet6_hashtables.h
include/net/inet_connection_sock.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/iucv/af_iucv.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack_acct.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_tuple.h
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_core.h
include/net/netfilter/nf_nat_protocol.h
include/net/netfilter/nf_tproxy_core.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
include/net/netns/mib.h
include/net/netns/xfrm.h
include/net/netprio_cgroup.h [new file with mode: 0644]
include/net/protocol.h
include/net/red.h
include/net/route.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/snmp.h
include/net/sock.h
include/net/tcp.h
include/net/tcp_memcontrol.h [new file with mode: 0644]
include/net/udp.h
include/net/xfrm.h
include/scsi/libfcoe.h
include/target/target_core_base.h
include/target/target_core_transport.h
include/trace/events/writeback.h
include/video/omapdss.h
include/xen/interface/io/xs_wire.h
include/xen/platform_pci.h
init/Kconfig
ipc/mqueue.c
ipc/msgutil.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/hrtimer.c
kernel/hung_task.c
kernel/irq/manage.c
kernel/irq/spurious.c
kernel/jump_label.c
kernel/lockdep.c
kernel/power/hibernate.c
kernel/power/main.c
kernel/printk.c
kernel/ptrace.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/sched_rt.c
kernel/signal.c
kernel/sysctl_binary.c
kernel/time/alarmtimer.c
kernel/time/clocksource.c
kernel/time/tick-broadcast.c
kernel/time/timekeeping.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
lib/Kconfig
lib/Makefile
lib/dma-debug.c
lib/dynamic_queue_limits.c [new file with mode: 0644]
lib/reciprocal_div.c
lib/vsprintf.c
mm/backing-dev.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/mempolicy.c
mm/migrate.c
mm/nommu.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu-vm.c
mm/percpu.c
mm/slab.c
mm/slub.c
mm/vmalloc.c
mm/vmscan.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlan_gvrp.c
net/8021q/vlan_netlink.c
net/8021q/vlanproc.c
net/Kconfig
net/Makefile
net/atm/atm_misc.c
net/atm/br2684.c
net/atm/clip.c
net/atm/common.c
net/atm/common.h
net/atm/pppoatm.c
net/ax25/af_ax25.c
net/batman-adv/bat_sysfs.c
net/batman-adv/bitarray.c
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/main.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/batman-adv/vis.c
net/bluetooth/bnep/core.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bridge/br.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/netfilter/ebt_ip6.c
net/bridge/netfilter/ebt_log.c
net/caif/Kconfig
net/caif/Makefile
net/caif/caif_dev.c
net/caif/caif_usb.c [new file with mode: 0644]
net/caif/cfcnfg.c
net/caif/cffrml.c
net/caif/cfpkt_skbuff.c
net/caif/cfrfml.c
net/caif/cfserl.c
net/ceph/crush/mapper.c
net/ceph/osd_client.c
net/core/Makefile
net/core/dev.c
net/core/dev_addr_lists.c
net/core/dst.c
net/core/ethtool.c
net/core/flow.c
net/core/flow_dissector.c [new file with mode: 0644]
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/netprio_cgroup.c [new file with mode: 0644]
net/core/pktgen.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c [new file with mode: 0644]
net/core/sysctl_net_core.c
net/dccp/ccids/ccid2.c
net/dccp/ccids/ccid3.c
net/dccp/ccids/lib/tfrc.c
net/dccp/ccids/lib/tfrc.h
net/dccp/dccp.h
net/dccp/diag.c
net/dccp/feat.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dccp/options.c
net/dccp/probe.c
net/dccp/proto.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/decnet/dn_timer.c
net/dsa/Kconfig
net/dsa/Makefile
net/dsa/dsa.c
net/dsa/dsa_priv.h
net/dsa/tag_dsa.c
net/dsa/tag_edsa.c
net/dsa/tag_trailer.c
net/econet/af_econet.c
net/ieee802154/6lowpan.c
net/ieee802154/6lowpan.h
net/ieee802154/dgram.c
net/ieee802154/raw.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/fib_rules.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_forward.c
net/ipv4/ip_gre.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ip_queue.c
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/ipt_NETMAP.c
net/ipv4/netfilter/ipt_REDIRECT.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/ipt_ecn.c [deleted file]
net/ipv4/netfilter/ipt_rpfilter.c [new file with mode: 0644]
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/netfilter/nf_nat_h323.c
net/ipv4/netfilter/nf_nat_helper.c
net/ipv4/netfilter/nf_nat_pptp.c
net/ipv4/netfilter/nf_nat_proto_common.c
net/ipv4/netfilter/nf_nat_proto_dccp.c
net/ipv4/netfilter/nf_nat_proto_gre.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/netfilter/nf_nat_proto_sctp.c
net/ipv4/netfilter/nf_nat_proto_tcp.c
net/ipv4/netfilter/nf_nat_proto_udp.c
net/ipv4/netfilter/nf_nat_proto_udplite.c
net/ipv4/netfilter/nf_nat_proto_unknown.c
net/ipv4/netfilter/nf_nat_rule.c
net/ipv4/netfilter/nf_nat_sip.c
net/ipv4/netfilter/nf_nat_standalone.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c [new file with mode: 0644]
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/tunnel4.c
net/ipv4/udp.c
net/ipv4/udp_diag.c [new file with mode: 0644]
net/ipv4/xfrm4_tunnel.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/anycast.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/exthdrs_core.c
net/ipv6/fib6_rules.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/mip6.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6_queue.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/ip6t_rpfilter.c [new file with mode: 0644]
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_mode_beet.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_state.c
net/irda/af_irda.c
net/irda/irlan/irlan_common.c
net/irda/irttp.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/llc/af_llc.c
net/mac80211/rc80211_pid_algo.c
net/mac80211/scan.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/ip_set_getport.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipvs/Kconfig
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_acct.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_proto_udplite.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_conntrack_timestamp.c
net/netfilter/nfnetlink_acct.c [new file with mode: 0644]
net/netfilter/xt_AUDIT.c
net/netfilter/xt_CT.c
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_TEE.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_connbytes.c
net/netfilter/xt_ecn.c [new file with mode: 0644]
net/netfilter/xt_hashlimit.c
net/netfilter/xt_nfacct.c [new file with mode: 0644]
net/netfilter/xt_socket.c
net/netlabel/netlabel_addrlist.c
net/netlabel/netlabel_addrlist.h
net/netlabel/netlabel_domainhash.c
net/netlabel/netlabel_domainhash.h
net/netlabel/netlabel_kapi.c
net/netlabel/netlabel_mgmt.c
net/netlabel/netlabel_unlabeled.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/netrom/af_netrom.c
net/netrom/nr_route.c
net/openvswitch/Kconfig [new file with mode: 0644]
net/openvswitch/Makefile [new file with mode: 0644]
net/openvswitch/actions.c [new file with mode: 0644]
net/openvswitch/datapath.c [new file with mode: 0644]
net/openvswitch/datapath.h [new file with mode: 0644]
net/openvswitch/dp_notify.c [new file with mode: 0644]
net/openvswitch/flow.c [new file with mode: 0644]
net/openvswitch/flow.h [new file with mode: 0644]
net/openvswitch/vport-internal_dev.c [new file with mode: 0644]
net/openvswitch/vport-internal_dev.h [new file with mode: 0644]
net/openvswitch/vport-netdev.c [new file with mode: 0644]
net/openvswitch/vport-netdev.h [new file with mode: 0644]
net/openvswitch/vport.c [new file with mode: 0644]
net/openvswitch/vport.h [new file with mode: 0644]
net/packet/af_packet.c
net/phonet/pep.c
net/rds/Kconfig
net/rfkill/rfkill-regulator.c
net/rxrpc/ar-ack.c
net/rxrpc/ar-key.c
net/rxrpc/ar-output.c
net/sched/cls_flow.c
net/sched/sch_api.c
net/sched/sch_choke.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/socket.c
net/sunrpc/addr.c
net/sunrpc/sched.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/config.c
net/tipc/core.c
net/tipc/discover.c
net/tipc/discover.h
net/tipc/eth_media.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/net.c
net/tipc/node.c
net/tipc/node.h
net/tipc/port.c
net/tipc/port.h
net/tipc/ref.c
net/tipc/socket.c
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/Kconfig
net/unix/Makefile
net/unix/af_unix.c
net/unix/diag.c [new file with mode: 0644]
net/x25/af_x25.c
net/x25/x25_dev.c
net/x25/x25_route.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/kconfig/Makefile
security/apparmor/path.c
security/integrity/evm/evm_crypto.c
security/keys/encrypted-keys/Makefile
security/keys/encrypted-keys/encrypted.c
security/keys/encrypted-keys/encrypted.h
security/keys/user_defined.c
security/lsm_audit.c
security/security.c
security/selinux/hooks.c
security/selinux/netnode.c
security/selinux/netport.c
security/smack/smackfs.c
security/tomoyo/realpath.c
sound/atmel/ac97c.c
sound/pci/cs5535audio/cs5535audio_pcm.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_eld.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/lx6464es/lx_core.c
sound/pci/lx6464es/lx_core.h
sound/pci/rme9652/hdspm.c
sound/pci/sis7019.c
sound/soc/atmel/Kconfig
sound/soc/atmel/Makefile
sound/soc/atmel/playpaq_wm8510.c [deleted file]
sound/soc/codecs/Kconfig
sound/soc/codecs/ad1836.h
sound/soc/codecs/adau1373.c
sound/soc/codecs/cs4270.c
sound/soc/codecs/cs4271.c
sound/soc/codecs/cs42l51.c
sound/soc/codecs/jz4740.c
sound/soc/codecs/max9877.c
sound/soc/codecs/rt5631.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sta32x.c
sound/soc/codecs/sta32x.h
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8753.c
sound/soc/codecs/wm8776.c
sound/soc/codecs/wm8958-dsp2.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8993.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/codecs/wm9081.c
sound/soc/codecs/wm9090.c
sound/soc/codecs/wm_hubs.c
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/mpc8610_hpcd.c
sound/soc/imx/Kconfig
sound/soc/kirkwood/Kconfig
sound/soc/mxs/mxs-pcm.c
sound/soc/mxs/mxs-sgtl5000.c
sound/soc/nuc900/nuc900-ac97.c
sound/soc/pxa/Kconfig
sound/soc/pxa/hx4700.c
sound/soc/samsung/jive_wm8750.c
sound/soc/samsung/smdk2443_wm9710.c
sound/soc/samsung/smdk_wm8994.c
sound/soc/samsung/speyside.c
sound/soc/soc-core.c
sound/soc/soc-utils.c
sound/usb/quirks-table.h
tools/perf/builtin-stat.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/session.c
tools/perf/util/trace-event-parse.c
tools/testing/ktest/ktest.pl
virt/kvm/assigned-dev.c

diff --git a/CREDITS b/CREDITS
index 07e32a87d956808fbb8b979fb38d434d93c879fc..44fce988eaac8cd22bfe5a5e753ae1bb58b3476d 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
 
 N: Kees Cook
 E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30  1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E  6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
 S: (ask for current address)
+S: Portland, Oregon
 S: USA
 
 N: Robin Cornelius
index 2b5d56127fce4d7f9a6f83b535cdfcf469f83e0b..c1eb41cb9876083d3df79a6a995b692762acd21b 100644 (file)
@@ -206,16 +206,3 @@ Description:
                when a discarded area is read the discard_zeroes_data
                parameter will be set to one. Otherwise it will be 0 and
                the result of reading a discarded area is undefined.
-What:          /sys/block/<disk>/alias
-Date:          Aug 2011
-Contact:       Nao Nishijima <nao.nishijima.xt@hitachi.com>
-Description:
-               A raw device name of a disk does not always point a same disk
-               each boot-up time. Therefore, users have to use persistent
-               device names, which udev creates when the kernel finds a disk,
-               instead of raw device name. However, kernel doesn't show those
-               persistent names on its messages (e.g. dmesg).
-               This file can store an alias of the disk and it would be
-               appeared in kernel messages if it is set. A disk can have an
-               alias which length is up to 255bytes. Users can use alphabets,
-               numbers, "-" and "_" in alias name. This file is writeonce.
index fa72ccb2282e77c879c0a7a135f6d86828a143cc..dbedafb095e24d3d3a8e2d93b6cbd727268d1754 100644 (file)
@@ -57,13 +57,6 @@ create_snap
 
         $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
 
-rollback_snap
-
-       Rolls back data to the specified snapshot. This goes over the entire
-       list of rados blocks and sends a rollback command to each.
-
-        $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
 snap_*
 
        A directory per each snapshot
index 54883de5d5f9b598ab981de82269c908dec86114..ac3d0018140cd34cd7dd242334de64713406c6b6 100644 (file)
@@ -520,6 +520,11 @@ Here's a description of the fields of <varname>struct uio_mem</varname>:
 </para>
 
 <itemizedlist>
+<listitem><para>
+<varname>const char *name</varname>: Optional. Set this to help identify
+the memory region, it will show up in the corresponding sysfs node.
+</para></listitem>
+
 <listitem><para>
 <varname>int memtype</varname>: Required if the mapping is used. Set this to
 <varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
@@ -553,7 +558,7 @@ instead to remember such an address.
 </itemizedlist>
 
 <para>
-Please do not touch the <varname>kobj</varname> element of
+Please do not touch the <varname>map</varname> element of
 <varname>struct uio_mem</varname>! It is used by the UIO framework
 to set up sysfs files for this mapping. Simply leave it alone.
 </para>
index 71464e09ec1841a39f031d972b281488e5d703af..b79d0a13e7cddf2512ef29069d60a54bf0399b02 100644 (file)
@@ -98,14 +98,12 @@ You must enable "SCSI tape drive support for Smart Array 5xxx" and
 "SCSI support" in your kernel configuration to be able to use SCSI
 tape drives with your Smart Array 5xxx controller.
 
-Additionally, note that the driver will not engage the SCSI core at init 
-time.  The driver must be directed to dynamically engage the SCSI core via 
-the /proc filesystem entry which the "block" side of the driver creates as 
-/proc/driver/cciss/cciss* at runtime.  This is because at driver init time, 
-the SCSI core may not yet be initialized (because the driver is a block 
-driver) and attempting to register it with the SCSI core in such a case 
-would cause a hang.  This is best done via an initialization script 
-(typically in /etc/init.d, but could vary depending on distribution). 
+Additionally, note that the driver will engage the SCSI core at init
+time if any tape drives or medium changers are detected.  The driver may
+also be directed to dynamically engage the SCSI core via the /proc filesystem
+entry which the "block" side of the driver creates as
+/proc/driver/cciss/cciss* at runtime.  This is best done via a script.
+
 For example:
 
        for x in /proc/driver/cciss/cciss[0-9]*
index cc0ebc5241b39f2e9513d89a39d880771cab9c67..4d8774f6f48abd7aaf59e48335a1d0b9415832cd 100644 (file)
@@ -44,8 +44,8 @@ Features:
  - oom-killer disable knob and oom-notifier
  - Root cgroup has no limit controls.
 
- Kernel memory and Hugepages are not under control yet. We just manage
- pages on LRU. To add more controls, we have to take care of performance.
+ Kernel memory support is work in progress, and the current version provides
+ basically functionality. (See Section 2.7)
 
 Brief summary of control files.
 
@@ -72,6 +72,9 @@ Brief summary of control files.
  memory.oom_control             # set/show oom controls.
  memory.numa_stat               # show the number of memory usage per numa node
 
+ memory.kmem.tcp.limit_in_bytes  # set/show hard limit for tcp buf memory
+ memory.kmem.tcp.usage_in_bytes  # show current tcp buf memory allocation
+
 1. History
 
 The memory controller has a long history. A request for comments for the memory
@@ -255,6 +258,27 @@ When oom event notifier is registered, event will be delivered.
   per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
   zone->lru_lock, it has no lock of its own.
 
+2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+
+With the Kernel memory extension, the Memory Controller is able to limit
+the amount of kernel memory used by the system. Kernel memory is fundamentally
+different than user memory, since it can't be swapped out, which makes it
+possible to DoS the system by consuming too much of this precious resource.
+
+Kernel memory limits are not imposed for the root cgroup. Usage for the root
+cgroup may or may not be accounted.
+
+Currently no soft limit is implemented for kernel memory. It is future work
+to trigger slab reclaim when those limits are reached.
+
+2.7.1 Current Kernel Memory resources accounted
+
+* sockets memory pressure: some sockets protocols have memory pressure
+thresholds. The Memory Controller allows them to be controlled individually
+per cgroup, instead of globally.
+
+* tcp memory pressure: sockets memory pressure for the tcp protocol.
+
 3. User Interface
 
 0. Configuration
diff --git a/Documentation/cgroups/net_prio.txt b/Documentation/cgroups/net_prio.txt
new file mode 100644 (file)
index 0000000..01b3226
--- /dev/null
@@ -0,0 +1,53 @@
+Network priority cgroup
+-------------------------
+
+The Network priority cgroup provides an interface to allow an administrator to
+dynamically set the priority of network traffic generated by various
+applications
+
+Nominally, an application would set the priority of its traffic via the
+SO_PRIORITY socket option.  This however, is not always possible because:
+
+1) The application may not have been coded to set this value
+2) The priority of application traffic is often a site-specific administrative
+   decision rather than an application defined one.
+
+This cgroup allows an administrator to assign a process to a group which defines
+the priority of egress traffic on a given interface. Network priority groups can
+be created by first mounting the cgroup filesystem.
+
+# mount -t cgroup -onet_prio none /sys/fs/cgroup/net_prio
+
+With the above step, the initial group acting as the parent accounting group
+becomes visible at '/sys/fs/cgroup/net_prio'.  This group includes all tasks in
+the system. '/sys/fs/cgroup/net_prio/tasks' lists the tasks in this cgroup.
+
+Each net_prio cgroup contains two files that are subsystem specific
+
+net_prio.prioidx
+This file is read-only, and is simply informative.  It contains a unique integer
+value that the kernel uses as an internal representation of this cgroup.
+
+net_prio.ifpriomap
+This file contains a map of the priorities assigned to traffic originating from
+processes in this group and egressing the system on various interfaces. It
+contains a list of tuples in the form <ifname priority>.  Contents of this file
+can be modified by echoing a string into the file using the same tuple format.
+for example:
+
+echo "eth0 5" > /sys/fs/cgroups/net_prio/iscsi/net_prio.ifpriomap
+
+This command would force any traffic originating from processes belonging to the
+iscsi net_prio cgroup and egressing on interface eth0 to have the priority of
+said traffic set to the value 5. The parent accounting group also has a
+writeable 'net_prio.ifpriomap' file that can be used to set a system default
+priority.
+
+Priorities are set immediately prior to queueing a frame to the device
+queueing discipline (qdisc) so priorities will be assigned prior to the hardware
+queue selection being made.
+
+One usage for the net_prio cgroup is with mqprio qdisc allowing application
+traffic to be steered to hardware/driver based traffic classes. These mappings
+can then be managed by administrators or other networking protocols such as
+DCBX.
diff --git a/Documentation/devicetree/bindings/net/calxeda-xgmac.txt b/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
new file mode 100644 (file)
index 0000000..411727a
--- /dev/null
@@ -0,0 +1,15 @@
+* Calxeda Highbank 10Gb XGMAC Ethernet
+
+Required properties:
+- compatible : Should be "calxeda,hb-xgmac"
+- reg : Address and length of the register set for the device
+- interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt.
+  The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt.
+
+Example:
+
+ethernet@fff50000 {
+        compatible = "calxeda,hb-xgmac";
+        reg = <0xfff50000 0x1000>;
+        interrupts = <0 77 4  0 78 4  0 79 4>;
+};
diff --git a/Documentation/devicetree/bindings/net/can/cc770.txt b/Documentation/devicetree/bindings/net/can/cc770.txt
new file mode 100644 (file)
index 0000000..77027bf
--- /dev/null
@@ -0,0 +1,53 @@
+Memory mapped Bosch CC770 and Intel AN82527 CAN controller
+
+Note: The CC770 is a CAN controller from Bosch, which is 100%
+compatible with the old AN82527 from Intel, but with "bugs" being fixed.
+
+Required properties:
+
+- compatible : should be "bosch,cc770" for the CC770 and "intc,82527"
+       for the AN82527.
+
+- reg : should specify the chip select, address offset and size required
+       to map the registers of the controller. The size is usually 0x80.
+
+- interrupts : property with a value describing the interrupt source
+       (number and sensitivity) required for the controller.
+
+Optional properties:
+
+- bosch,external-clock-frequency : frequency of the external oscillator
+       clock in Hz. Note that the internal clock frequency used by the
+       controller is half of that value. If not specified, a default
+       value of 16000000 (16 MHz) is used.
+
+- bosch,clock-out-frequency : slock frequency in Hz on the CLKOUT pin.
+       If not specified or if the specified value is 0, the CLKOUT pin
+       will be disabled.
+
+- bosch,slew-rate : slew rate of the CLKOUT signal. If not specified,
+       a resonable value will be calculated.
+
+- bosch,disconnect-rx0-input : see data sheet.
+
+- bosch,disconnect-rx1-input : see data sheet.
+
+- bosch,disconnect-tx1-output : see data sheet.
+
+- bosch,polarity-dominant : see data sheet.
+
+- bosch,divide-memory-clock : see data sheet.
+
+- bosch,iso-low-speed-mux : see data sheet.
+
+For further information, please have a look to the CC770 or AN82527.
+
+Examples:
+
+can@3,100 {
+       compatible = "bosch,cc770";
+       reg = <3 0x100 0x80>;
+       interrupts = <2 0>;
+       interrupt-parent = <&mpic>;
+       bosch,external-clock-frequency = <16000000>;
+};
index e8552782b440af99ed14a6e851e3db5ed47d05fd..874921e97802d1d8c0e7cc2b694cca4bc3f18bf4 100644 (file)
@@ -33,6 +33,7 @@ qcom  Qualcomm, Inc.
 ramtron        Ramtron International
 samsung        Samsung Semiconductor
 schindler      Schindler
+sil    Silicon Image
 simtek
 sirf   SiRF Technology, Inc.
 stericsson     ST-Ericsson
index 64087c34327fe0ba11e790e0a41224b8e7c1d30c..7671352216f1369d8d3c7dd02f9ae06fd9f90c87 100644 (file)
@@ -63,8 +63,8 @@ IRC network.
 Userspace tools for creating and manipulating Btrfs file systems are
 available from the git repository at the following location:
 
- http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
- git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
+ http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
 
 These include the following tools:
 
index e9890709c508b25ed9878ab979797d1fc58a7a4b..cdfe13901b99cb64a9bbc2484b13ca7cd174e878 100644 (file)
@@ -1,22 +1,24 @@
 The I2C protocol knows about two kinds of device addresses: normal 7 bit
 addresses, and an extended set of 10 bit addresses. The sets of addresses
 do not intersect: the 7 bit address 0x10 is not the same as the 10 bit
-address 0x10 (though a single device could respond to both of them). You
-select a 10 bit address by adding an extra byte after the address
-byte:
-  S Addr7 Rd/Wr ....
-becomes
-  S 11110 Addr10 Rd/Wr
-S is the start bit, Rd/Wr the read/write bit, and if you count the number
-of bits, you will see the there are 8 after the S bit for 7 bit addresses,
-and 16 after the S bit for 10 bit addresses.
+address 0x10 (though a single device could respond to both of them).
 
-WARNING! The current 10 bit address support is EXPERIMENTAL. There are
-several places in the code that will cause SEVERE PROBLEMS with 10 bit
-addresses, even though there is some basic handling and hooks. Also,
-almost no supported adapter handles the 10 bit addresses correctly.
+I2C messages to and from 10-bit address devices have a different format.
+See the I2C specification for the details.
 
-As soon as a real 10 bit address device is spotted 'in the wild', we
-can and will add proper support. Right now, 10 bit address devices
-are defined by the I2C protocol, but we have never seen a single device
-which supports them.
+The current 10 bit address support is minimal. It should work, however
+you can expect some problems along the way:
+* Not all bus drivers support 10-bit addresses. Some don't because the
+  hardware doesn't support them (SMBus doesn't require 10-bit address
+  support for example), some don't because nobody bothered adding the
+  code (or it's there but not working properly.) Software implementation
+  (i2c-algo-bit) is known to work.
+* Some optional features do not support 10-bit addresses. This is the
+  case of automatic detection and instantiation of devices by their,
+  drivers, for example.
+* Many user-space packages (for example i2c-tools) lack support for
+  10-bit addresses.
+
+Note that 10-bit address devices are still pretty rare, so the limitations
+listed above could stay for a long time, maybe even forever if nobody
+needs them to be fixed.
index a0c5c5f4fce6e9587346a4a049c9725e5ca45de5..81c287fad79d6370d0d697d5ddf33b8af756a036 100644 (file)
@@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        CPU-intensive style benchmark, and it can vary highly in
                        a microbenchmark depending on workload and compiler.
 
-                       1: only for 32-bit processes
-                       2: only for 64-bit processes
+                       32: only for 32-bit processes
+                       64: only for 64-bit processes
                        on: enable for both 32- and 64-bit processes
                        off: disable for both 32- and 64-bit processes
 
-       amd_iommu=      [HW,X86-84]
+       amd_iommu=      [HW,X86-64]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
                        fullflush - enable flushing of IO/TLB entries when
index bbce1215434a9e70797ce679bb6a15c5c2aa29ab..9ad9ddeb384cc9048b8fe837414b29626fc9d769 100644 (file)
@@ -144,6 +144,8 @@ nfc.txt
        - The Linux Near Field Communication (NFS) subsystem.
 olympic.txt
        - IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info.
+openvswitch.txt
+       - Open vSwitch developer documentation.
 operstates.txt
        - Overview of network interface operational states.
 packet_mmap.txt
index c86d03f18a5be6c70c97f2fadacd0cb93f66f800..221ad0cdf11f2fb729b1fe2bc624b30e53d996be 100644 (file)
@@ -200,15 +200,16 @@ abled  during run time. Following log_levels are defined:
 
 0 - All  debug  output  disabled
 1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable route or tt entry added / changed / deleted
-3 - Enable all messages
+2 - Enable messages related to route added / changed / deleted
+4 - Enable messages related to translation table operations
+7 - Enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
 
 # echo 2 > /sys/class/net/bat0/mesh/log_level
 
-will enable debug messages for when routes or TTs change.
+will enable debug messages for when routes change.
 
 
 BATCTL
index 91df678fb7f88428f3989da217647d676e7121de..080ad26690ae436a6981883cb48b0a624f63f478 100644 (file)
@@ -196,6 +196,23 @@ or, for backwards compatibility, the option value.  E.g.,
 
        The parameters are as follows:
 
+active_slave
+
+       Specifies the new active slave for modes that support it
+       (active-backup, balance-alb and balance-tlb).  Possible values
+       are the name of any currently enslaved interface, or an empty
+       string.  If a name is given, the slave and its link must be up in order
+       to be selected as the new active slave.  If an empty string is
+       specified, the current active slave is cleared, and a new active
+       slave is selected automatically.
+
+       Note that this is only available through the sysfs interface. No module
+       parameter by this name exists.
+
+       The normal value of this option is the name of the currently
+       active slave, or the empty string if there is no active slave or
+       the current mode does not use an active slave.
+
 ad_select
 
        Specifies the 802.3ad aggregation selection logic to use.  The
index f41ea24052206d7657ee8c10b90d857bdb1e373a..1dc1c24a7547019c1ade5232eb88780abf358c11 100644 (file)
@@ -78,3 +78,30 @@ in software. This is currently WIP.
 
 See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
 
+6LoWPAN Linux implementation
+============================
+
+The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+octets of actual MAC payload once security is turned on, on a wireless link
+with a link throughput of 250 kbps or less.  The 6LoWPAN adaptation format
+[RFC4944] was specified to carry IPv6 datagrams over such constrained links,
+taking into account limited bandwidth, memory, or energy resources that are
+expected in applications such as wireless Sensor Networks.  [RFC4944] defines
+a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header
+to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
+compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
+relatively large IPv6 and UDP headers down to (in the best case) several bytes.
+
+In Semptember 2011 the standard update was published - [RFC6282].
+It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
+used in this Linux implementation.
+
+All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+
+To setup 6lowpan interface you need (busybox release > 1.17.0):
+1. Add IEEE802.15.4 interface and initialize PANid;
+2. Add 6lowpan interface by command like:
+   # ip link add link wpan0 name lowpan0 type lowpan
+3. Set MAC (if needs):
+   # ip link set lowpan0 address de:ad:be:ef:ca:fe:ba:be
+4. Bring up 'lowpan0' interface
index 65968fbf1e49cb3b35d64da63365cc7e9c085601..ac5debb2f16c35a048a173811f1846364f2c6585 100644 (file)
@@ -539,12 +539,14 @@ static int if_getconfig(char *ifname)
                metric = 0;
        } else
                metric = ifr.ifr_metric;
+       printf("The result of SIOCGIFMETRIC is %d\n", metric);
 
        strcpy(ifr.ifr_name, ifname);
        if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0)
                mtu = 0;
        else
                mtu = ifr.ifr_mtu;
+       printf("The result of SIOCGIFMTU is %d\n", mtu);
 
        strcpy(ifr.ifr_name, ifname);
        if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) {
index cb7f3148035dbeaabbcc514ddf53705733321d66..ad3e80e17b4f49a287c1d97d39e38cd8b092aa86 100644 (file)
@@ -20,7 +20,7 @@ ip_no_pmtu_disc - BOOLEAN
        default FALSE
 
 min_pmtu - INTEGER
-       default 562 - minimum discovered Path MTU
+       default 552 - minimum discovered Path MTU
 
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
@@ -31,6 +31,16 @@ neigh/default/gc_thresh3 - INTEGER
        when using large numbers of interfaces and when communicating
        with large numbers of directly-connected peers.
 
+neigh/default/unres_qlen_bytes - INTEGER
+       The maximum number of bytes which may be used by packets
+       queued for each unresolved address by other network layers.
+       (added in linux 3.3)
+
+neigh/default/unres_qlen - INTEGER
+       The maximum number of packets which may be queued for each
+       unresolved address by other network layers.
+       (deprecated in linux 3.3) : use unres_qlen_bytes instead.
+
 mtu_expires - INTEGER
        Time, in seconds, that cached PMTU information is kept.
 
@@ -165,6 +175,9 @@ tcp_congestion_control - STRING
        connections. The algorithm "reno" is always available, but
        additional choices may be available based on kernel configuration.
        Default is set as part of kernel configuration.
+       For passive connections, the listener congestion control choice
+       is inherited.
+       [see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ]
 
 tcp_cookie_size - INTEGER
        Default size of TCP Cookie Transactions (TCPCT) option, that may be
@@ -282,11 +295,11 @@ tcp_max_ssthresh - INTEGER
        Default: 0 (off)
 
 tcp_max_syn_backlog - INTEGER
-       Maximal number of remembered connection requests, which are
-       still did not receive an acknowledgment from connecting client.
-       Default value is 1024 for systems with more than 128Mb of memory,
-       and 128 for low memory machines. If server suffers of overload,
-       try to increase this number.
+       Maximal number of remembered connection requests, which have not
+       received an acknowledgment from connecting client.
+       The minimal value is 128 for low memory machines, and it will
+       increase in proportion to the memory of machine.
+       If server suffers from overload, try increasing this number.
 
 tcp_max_tw_buckets - INTEGER
        Maximal number of timewait sockets held by system simultaneously.
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
new file mode 100644 (file)
index 0000000..b8a048b
--- /dev/null
@@ -0,0 +1,195 @@
+Open vSwitch datapath developer documentation
+=============================================
+
+The Open vSwitch kernel module allows flexible userspace control over
+flow-level packet processing on selected network devices.  It can be
+used to implement a plain Ethernet switch, network device bonding,
+VLAN processing, network access control, flow-based network control,
+and so on.
+
+The kernel module implements multiple "datapaths" (analogous to
+bridges), each of which can have multiple "vports" (analogous to ports
+within a bridge).  Each datapath also has associated with it a "flow
+table" that userspace populates with "flows" that map from keys based
+on packet headers and metadata to sets of actions.  The most common
+action forwards the packet to another vport; other actions are also
+implemented.
+
+When a packet arrives on a vport, the kernel module processes it by
+extracting its flow key and looking it up in the flow table.  If there
+is a matching flow, it executes the associated actions.  If there is
+no match, it queues the packet to userspace for processing (as part of
+its processing, userspace will likely set up a flow to handle further
+packets of the same type entirely in-kernel).
+
+
+Flow key compatibility
+----------------------
+
+Network protocols evolve over time.  New protocols become important
+and existing protocols lose their prominence.  For the Open vSwitch
+kernel module to remain relevant, it must be possible for newer
+versions to parse additional protocols as part of the flow key.  It
+might even be desirable, someday, to drop support for parsing
+protocols that have become obsolete.  Therefore, the Netlink interface
+to Open vSwitch is designed to allow carefully written userspace
+applications to work with any version of the flow key, past or future.
+
+To support this forward and backward compatibility, whenever the
+kernel module passes a packet to userspace, it also passes along the
+flow key that it parsed from the packet.  Userspace then extracts its
+own notion of a flow key from the packet and compares it against the
+kernel-provided version:
+
+    - If userspace's notion of the flow key for the packet matches the
+      kernel's, then nothing special is necessary.
+
+    - If the kernel's flow key includes more fields than the userspace
+      version of the flow key, for example if the kernel decoded IPv6
+      headers but userspace stopped at the Ethernet type (because it
+      does not understand IPv6), then again nothing special is
+      necessary.  Userspace can still set up a flow in the usual way,
+      as long as it uses the kernel-provided flow key to do it.
+
+    - If the userspace flow key includes more fields than the
+      kernel's, for example if userspace decoded an IPv6 header but
+      the kernel stopped at the Ethernet type, then userspace can
+      forward the packet manually, without setting up a flow in the
+      kernel.  This case is bad for performance because every packet
+      that the kernel considers part of the flow must go to userspace,
+      but the forwarding behavior is correct.  (If userspace can
+      determine that the values of the extra fields would not affect
+      forwarding behavior, then it could set up a flow anyway.)
+
+How flow keys evolve over time is important to making this work, so
+the following sections go into detail.
+
+
+Flow key format
+---------------
+
+A flow key is passed over a Netlink socket as a sequence of Netlink
+attributes.  Some attributes represent packet metadata, defined as any
+information about a packet that cannot be extracted from the packet
+itself, e.g. the vport on which the packet was received.  Most
+attributes, however, are extracted from headers within the packet,
+e.g. source and destination addresses from Ethernet, IP, or TCP
+headers.
+
+The <linux/openvswitch.h> header file defines the exact format of the
+flow key attributes.  For informal explanatory purposes here, we write
+them as comma-separated strings, with parentheses indicating arguments
+and nesting.  For example, the following could represent a flow key
+corresponding to a TCP packet that arrived on vport 1:
+
+    in_port(1), eth(src=e0:91:f5:21:d0:b2, dst=00:02:e3:0f:80:a4),
+    eth_type(0x0800), ipv4(src=172.16.0.20, dst=172.18.0.52, proto=17, tos=0,
+    frag=no), tcp(src=49163, dst=80)
+
+Often we ellipsize arguments not important to the discussion, e.g.:
+
+    in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...)
+
+
+Basic rule for evolving flow keys
+---------------------------------
+
+Some care is needed to really maintain forward and backward
+compatibility for applications that follow the rules listed under
+"Flow key compatibility" above.
+
+The basic rule is obvious:
+
+    ------------------------------------------------------------------
+    New network protocol support must only supplement existing flow
+    key attributes.  It must not change the meaning of already defined
+    flow key attributes.
+    ------------------------------------------------------------------
+
+This rule does have less-obvious consequences so it is worth working
+through a few examples.  Suppose, for example, that the kernel module
+did not already implement VLAN parsing.  Instead, it just interpreted
+the 802.1Q TPID (0x8100) as the Ethertype then stopped parsing the
+packet.  The flow key for any packet with an 802.1Q header would look
+essentially like this, ignoring metadata:
+
+    eth(...), eth_type(0x8100)
+
+Naively, to add VLAN support, it makes sense to add a new "vlan" flow
+key attribute to contain the VLAN tag, then continue to decode the
+encapsulated headers beyond the VLAN tag using the existing field
+definitions.  With this change, an TCP packet in VLAN 10 would have a
+flow key much like this:
+
+    eth(...), vlan(vid=10, pcp=0), eth_type(0x0800), ip(proto=6, ...), tcp(...)
+
+But this change would negatively affect a userspace application that
+has not been updated to understand the new "vlan" flow key attribute.
+The application could, following the flow compatibility rules above,
+ignore the "vlan" attribute that it does not understand and therefore
+assume that the flow contained IP packets.  This is a bad assumption
+(the flow only contains IP packets if one parses and skips over the
+802.1Q header) and it could cause the application's behavior to change
+across kernel versions even though it follows the compatibility rules.
+
+The solution is to use a set of nested attributes.  This is, for
+example, why 802.1Q support uses nested attributes.  A TCP packet in
+VLAN 10 is actually expressed as:
+
+    eth(...), eth_type(0x8100), vlan(vid=10, pcp=0), encap(eth_type(0x0800),
+    ip(proto=6, ...), tcp(...)))
+
+Notice how the "eth_type", "ip", and "tcp" flow key attributes are
+nested inside the "encap" attribute.  Thus, an application that does
+not understand the "vlan" key will not see either of those attributes
+and therefore will not misinterpret them.  (Also, the outer eth_type
+is still 0x8100, not changed to 0x0800.)
+
+Handling malformed packets
+--------------------------
+
+Don't drop packets in the kernel for malformed protocol headers, bad
+checksums, etc.  This would prevent userspace from implementing a
+simple Ethernet switch that forwards every packet.
+
+Instead, in such a case, include an attribute with "empty" content.
+It doesn't matter if the empty content could be valid protocol values,
+as long as those values are rarely seen in practice, because userspace
+can always forward all packets with those values to userspace and
+handle them individually.
+
+For example, consider a packet that contains an IP header that
+indicates protocol 6 for TCP, but which is truncated just after the IP
+header, so that the TCP header is missing.  The flow key for this
+packet would include a tcp attribute with all-zero src and dst, like
+this:
+
+    eth(...), eth_type(0x0800), ip(proto=6, ...), tcp(src=0, dst=0)
+
+As another example, consider a packet with an Ethernet type of 0x8100,
+indicating that a VLAN TCI should follow, but which is truncated just
+after the Ethernet type.  The flow key for this packet would include
+an all-zero-bits vlan and an empty encap attribute, like this:
+
+    eth(...), eth_type(0x8100), vlan(0), encap()
+
+Unlike a TCP packet with source and destination ports 0, an
+all-zero-bits VLAN TCI is not that rare, so the CFI bit (aka
+VLAN_TAG_PRESENT inside the kernel) is ordinarily set in a vlan
+attribute expressly to allow this situation to be distinguished.
+Thus, the flow key in this second example unambiguously indicates a
+missing or malformed VLAN TCI.
+
+Other rules
+-----------
+
+The other rules for flow keys are much less subtle:
+
+    - Duplicate attributes are not allowed at a given nesting level.
+
+    - Ordering of attributes is not significant.
+
+    - When the kernel sends a given flow key to userspace, it always
+      composes it the same way.  This allows userspace to hash and
+      compare entire flow keys that it may not be able to fully
+      interpret.
index 4acea660372011ab7ae95d42cc84decc55d4cc22..1c08a4b0981fb7f648cee60ffea8b3c45bb319c4 100644 (file)
@@ -155,7 +155,7 @@ As capture, each frame contains two parts:
 
  /* fill sockaddr_ll struct to prepare binding */
  my_addr.sll_family = AF_PACKET;
- my_addr.sll_protocol = ETH_P_ALL;
+ my_addr.sll_protocol = htons(ETH_P_ALL);
  my_addr.sll_ifindex =  s_ifr.ifr_ifindex;
 
  /* bind socket to eth0 */
index a177de21d28e5545bfc89b8569226295e1c80b34..579994afbe067bf9bf6d79bf50c62986dda2765d 100644 (file)
@@ -208,7 +208,7 @@ The counter in rps_dev_flow_table values records the length of the current
 CPU's backlog when a packet in this flow was last enqueued. Each backlog
 queue has a head counter that is incremented on dequeue. A tail counter
 is computed as head counter + queue length. In other words, the counter
-in rps_dev_flow_table[i] records the last element in flow i that has
+in rps_dev_flow[i] records the last element in flow i that has
 been enqueued onto the currently designated CPU for flow i (of course,
 entry i is actually selected by hash and multiple flows may hash to the
 same entry i).
@@ -224,7 +224,7 @@ following is true:
 
 - The current CPU's queue head counter >= the recorded tail counter
   value in rps_dev_flow[i]
-- The current CPU is unset (equal to NR_CPUS)
+- The current CPU is unset (equal to RPS_NO_CPU)
 - The current CPU is offline
 
 After this check, the packet is sent to the (possibly updated) current
@@ -235,7 +235,7 @@ CPU.
 
 ==== RFS Configuration
 
-RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on
+RFS is only available if the kconfig symbol CONFIG_RPS is enabled (on
 by default for SMP). The functionality remains disabled until explicitly
 configured. The number of entries in the global flow table is set through:
 
@@ -258,7 +258,7 @@ For a single queue device, the rps_flow_cnt value for the single queue
 would normally be configured to the same value as rps_sock_flow_entries.
 For a multi-queue device, the rps_flow_cnt for each queue might be
 configured as rps_sock_flow_entries / N, where N is the number of
-queues. So for instance, if rps_flow_entries is set to 32768 and there
+queues. So for instance, if rps_sock_flow_entries is set to 32768 and there
 are 16 configured receive queues, rps_flow_cnt for each queue might be
 configured as 2048.
 
index 8d67980fabe8d2a23affc6513523046f70acad42..d0aeeadd264b4aec82774b095fc9efddb369eb94 100644 (file)
@@ -4,14 +4,16 @@ Copyright (C) 2007-2010  STMicroelectronics Ltd
 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 
 This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
-(Synopsys IP blocks); it has been fully tested on STLinux platforms.
+(Synopsys IP blocks).
 
 Currently this network device driver is for all STM embedded MAC/GMAC
-(i.e. 7xxx/5xxx SoCs) and it's known working on other platforms i.e. ARM SPEAr.
+(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
+FF1152AMT0221 D1215994A VIRTEX FPGA board.
 
-DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing the first code
-implementation.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
+Universal version 4.0 have been used for developing this driver.
+
+This driver supports both the platform bus and PCI.
 
 Please, for more information also visit: www.stlinux.com
 
@@ -277,5 +279,5 @@ In fact, these can generate an huge amount of debug messages.
 
 6) TODO:
  o XGMAC is not supported.
- o Review the timer optimisation code to use an embedded device that will be
-  available in new chip generations.
+ o Add the EEE - Energy Efficient Ethernet
+ o Add the PTP - precision time protocol
diff --git a/Documentation/networking/team.txt b/Documentation/networking/team.txt
new file mode 100644 (file)
index 0000000..5a01368
--- /dev/null
@@ -0,0 +1,2 @@
+Team devices are driven from userspace via libteam library which is here:
+       https://github.com/jpirko/libteam
index 646a89e0c07d50c682912ef0200dc087363cd37f..3139fb505dcec97cb609e02326110e8b5725a43a 100644 (file)
@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it.
 Subsystem-Level Methods
 -----------------------
 The core methods to suspend and resume devices reside in struct dev_pm_ops
-pointed to by the pm member of struct bus_type, struct device_type and
-struct class.  They are mostly of interest to the people writing infrastructure
-for buses, like PCI or USB, or device type and device class drivers.
+pointed to by the ops member of struct dev_pm_domain, or by the pm member of
+struct bus_type, struct device_type and struct class.  They are mostly of
+interest to the people writing infrastructure for platforms and buses, like PCI
+or USB, or device type and device class drivers.
 
 Bus drivers implement these methods as appropriate for the hardware and the
 drivers using it; PCI works differently from USB, and so on.  Not many people
@@ -139,41 +140,57 @@ sequencing in the driver model tree.
 
 /sys/devices/.../power/wakeup files
 -----------------------------------
-All devices in the driver model have two flags to control handling of wakeup
-events (hardware signals that can force the device and/or system out of a low
-power state).  These flags are initialized by bus or device driver code using
+All device objects in the driver model contain fields that control the handling
+of system wakeup events (hardware signals that can force the system out of a
+sleep state).  These fields are initialized by bus or device driver code using
 device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
 include/linux/pm_wakeup.h.
 
-The "can_wakeup" flag just records whether the device (and its driver) can
+The "power.can_wakeup" flag just records whether the device (and its driver) can
 physically support wakeup events.  The device_set_wakeup_capable() routine
-affects this flag.  The "should_wakeup" flag controls whether the device should
-try to use its wakeup mechanism.  device_set_wakeup_enable() affects this flag;
-for the most part drivers should not change its value.  The initial value of
-should_wakeup is supposed to be false for the majority of devices; the major
-exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
-(wake-on-LAN) feature has been set up with ethtool.  It should also default
-to true for devices that don't generate wakeup requests on their own but merely
-forward wakeup requests from one bus to another (like PCI bridges).
+affects this flag.  The "power.wakeup" field is a pointer to an object of type
+struct wakeup_source used for controlling whether or not the device should use
+its system wakeup mechanism and for notifying the PM core of system wakeup
+events signaled by the device.  This object is only present for wakeup-capable
+devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
+removed) by device_set_wakeup_capable().
 
 Whether or not a device is capable of issuing wakeup events is a hardware
 matter, and the kernel is responsible for keeping track of it.  By contrast,
 whether or not a wakeup-capable device should issue wakeup events is a policy
 decision, and it is managed by user space through a sysfs attribute: the
-power/wakeup file.  User space can write the strings "enabled" or "disabled" to
-set or clear the "should_wakeup" flag, respectively.  This file is only present
-for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set)
-and is created (or removed) by device_set_wakeup_capable().  Reads from the
-file will return the corresponding string.
-
-The device_may_wakeup() routine returns true only if both flags are set.
+"power/wakeup" file.  User space can write the strings "enabled" or "disabled"
+to it to indicate whether or not, respectively, the device is supposed to signal
+system wakeup.  This file is only present if the "power.wakeup" object exists
+for the given device and is created (or removed) along with that object, by
+device_set_wakeup_capable().  Reads from the file will return the corresponding
+string.
+
+The "power/wakeup" file is supposed to contain the "disabled" string initially
+for the majority of devices; the major exceptions are power buttons, keyboards,
+and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
+ethtool.  It should also default to "enabled" for devices that don't generate
+wakeup requests on their own but merely forward wakeup requests from one bus to
+another (like PCI Express ports).
+
+The device_may_wakeup() routine returns true only if the "power.wakeup" object
+exists and the corresponding "power/wakeup" file contains the string "enabled".
 This information is used by subsystems, like the PCI bus type code, to see
 whether or not to enable the devices' wakeup mechanisms.  If device wakeup
 mechanisms are enabled or disabled directly by drivers, they also should use
 device_may_wakeup() to decide what to do during a system sleep transition.
-However for runtime power management, wakeup events should be enabled whenever
-the device and driver both support them, regardless of the should_wakeup flag.
-
+Device drivers, however, are not supposed to call device_set_wakeup_enable()
+directly in any case.
+
+It ought to be noted that system wakeup is conceptually different from "remote
+wakeup" used by runtime power management, although it may be supported by the
+same physical mechanism.  Remote wakeup is a feature allowing devices in
+low-power states to trigger specific interrupts to signal conditions in which
+they should be put into the full-power state.  Those interrupts may or may not
+be used to signal system wakeup events, depending on the hardware design.  On
+some systems it is impossible to trigger them from system sleep states.  In any
+case, remote wakeup should always be enabled for runtime power management for
+all devices and drivers that support it.
 
 /sys/devices/.../power/control files
 ------------------------------------
@@ -249,20 +266,31 @@ for every device before the next phase begins.  Not all busses or classes
 support all these callbacks and not all drivers use all the callbacks.  The
 various phases always run after tasks have been frozen and before they are
 unfrozen.  Furthermore, the *_noirq phases run at a time when IRQ handlers have
-been disabled (except for those marked with the IRQ_WAKEUP flag).
-
-All phases use bus, type, or class callbacks (that is, methods defined in
-dev->bus->pm, dev->type->pm, or dev->class->pm).  These callbacks are mutually
-exclusive, so if the device type provides a struct dev_pm_ops object pointed to
-by its pm field (i.e. both dev->type and dev->type->pm are defined), the
-callbacks included in that object (i.e. dev->type->pm) will be used.  Otherwise,
-if the class provides a struct dev_pm_ops object pointed to by its pm field
-(i.e. both dev->class and dev->class->pm are defined), the PM core will use the
-callbacks from that object (i.e. dev->class->pm).  Finally, if the pm fields of
-both the device type and class objects are NULL (or those objects do not exist),
-the callbacks provided by the bus (that is, the callbacks from dev->bus->pm)
-will be used (this allows device types to override callbacks provided by bus
-types or classes if necessary).
+been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
+
+All phases use PM domain, bus, type, or class callbacks (that is, methods
+defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
+These callbacks are regarded by the PM core as mutually exclusive.  Moreover,
+PM domain callbacks always take precedence over bus, type and class callbacks,
+while type callbacks take precedence over bus and class callbacks, and class
+callbacks take precedence over bus callbacks.  To be precise, the following
+rules are used to determine which callback to execute in the given phase:
+
+    1. If dev->pm_domain is present, the PM core will attempt to execute the
+       callback included in dev->pm_domain->ops.  If that callback is not
+       present, no action will be carried out for the given device.
+
+    2. Otherwise, if both dev->type and dev->type->pm are present, the callback
+       included in dev->type->pm will be executed.
+
+    3. Otherwise, if both dev->class and dev->class->pm are present, the
+       callback included in dev->class->pm will be executed.
+
+    4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
+       included in dev->bus->pm will be executed.
+
+This allows PM domains and device types to override callbacks provided by bus
+types or device classes if necessary.
 
 These callbacks may in turn invoke device- or driver-specific methods stored in
 dev->driver->pm, but they don't have to.
@@ -283,9 +311,8 @@ When the system goes into the standby or memory sleep state, the phases are:
 
        After the prepare callback method returns, no new children may be
        registered below the device.  The method may also prepare the device or
-       driver in some way for the upcoming system power transition (for
-       example, by allocating additional memory required for this purpose), but
-       it should not put the device into a low-power state.
+       driver in some way for the upcoming system power transition, but it
+       should not put the device into a low-power state.
 
     2. The suspend methods should quiesce the device to stop it from performing
        I/O.  They also may save the device registers and put it into the
index 5336149f831ba47d6215ccc1d9f9d3f66df370df..c2ae8bf77d46d6fbd12490a6a9aacc14c11c4931 100644 (file)
@@ -44,25 +44,33 @@ struct dev_pm_ops {
 };
 
 The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
-are executed by the PM core for either the power domain, or the device type
-(if the device power domain's struct dev_pm_ops does not exist), or the class
-(if the device power domain's and type's struct dev_pm_ops object does not
-exist), or the bus type (if the device power domain's, type's and class'
-struct dev_pm_ops objects do not exist) of the given device, so the priority
-order of callbacks from high to low is that power domain callbacks, device
-type callbacks, class callbacks and bus type callbacks, and the high priority
-one will take precedence over low priority one. The bus type, device type and
-class callbacks are referred to as subsystem-level callbacks in what follows,
-and generally speaking, the power domain callbacks are used for representing
-power domains within a SoC.
+are executed by the PM core for the device's subsystem that may be either of
+the following:
+
+  1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
+     is present.
+
+  2. Device type of the device, if both dev->type and dev->type->pm are present.
+
+  3. Device class of the device, if both dev->class and dev->class->pm are
+     present.
+
+  4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
+
+The PM core always checks which callback to use in the order given above, so the
+priority order of callbacks from high to low is: PM domain, device type, class
+and bus type.  Moreover, the high-priority one will always take precedence over
+a low-priority one.  The PM domain, bus type, device type and class callbacks
+are referred to as subsystem-level callbacks in what follows.
 
 By default, the callbacks are always invoked in process context with interrupts
 enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
-to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
-callbacks should be invoked in atomic context with interrupts disabled.
-This implies that these callback routines must not block or sleep, but it also
-means that the synchronous helper functions listed at the end of Section 4 can
-be used within an interrupt handler or in an atomic context.
+to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
+->runtime_idle() callbacks may be invoked in atomic context with interrupts
+disabled for a given device.  This implies that the callback routines in
+question must not block or sleep, but it also means that the synchronous helper
+functions listed at the end of Section 4 may be used for that device within an
+interrupt handler or generally in an atomic context.
 
 The subsystem-level suspend callback is _entirely_ _responsible_ for handling
 the suspend of the device as appropriate, which may, but need not include
index 079cb3df62cf6ff9908bebe493344d1cfdd99846..41c8378c0b2fb9ee86d3a781e8aaff6cad33ff9a 100644 (file)
 
        struct serial_rs485 rs485conf;
 
-       /* Set RS485 mode: */
+       /* Enable RS485 mode: */
        rs485conf.flags |= SER_RS485_ENABLED;
 
+       /* Set logical level for RTS pin equal to 1 when sending: */
+       rs485conf.flags |= SER_RS485_RTS_ON_SEND;
+       /* or, set logical level for RTS pin equal to 0 when sending: */
+       rs485conf.flags &= ~(SER_RS485_RTS_ON_SEND);
+
+       /* Set logical level for RTS pin equal to 1 after sending: */
+       rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
+       /* or, set logical level for RTS pin equal to 0 after sending: */
+       rs485conf.flags &= ~(SER_RS485_RTS_AFTER_SEND);
+
        /* Set rts delay before send, if needed: */
-       rs485conf.flags |= SER_RS485_RTS_BEFORE_SEND;
        rs485conf.delay_rts_before_send = ...;
 
        /* Set rts delay after send, if needed: */
-       rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
        rs485conf.delay_rts_after_send = ...;
 
        /* Set this flag if you want to receive data even whilst sending data */
index 03e2771ddeef53545357cc4bfbde29a378f9206d..91fee3b45fb80f75ac406dfbba093f1f49a418c1 100644 (file)
@@ -579,7 +579,7 @@ Development Tree
 ~~~~~~~~~~~~~~~~
 The latest development codes for HD-audio are found on sound git tree:
 
-- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git
+- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
 
 The master branch or for-next branches can be used as the main
 development branches in general while the HD-audio specific patches
@@ -594,7 +594,7 @@ is, installed via the usual spells: configure, make and make
 install(-modules).  See INSTALL in the package.  The snapshot tarballs
 are found at:
 
-- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/snapshot/
+- ftp://ftp.suse.com/pub/people/tiwai/snapshot/
 
 
 Sending a Bug Report
@@ -696,7 +696,7 @@ via hda-verb won't change the mixer value.
 
 The hda-verb program is found in the ftp directory:
 
-- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/misc/
+- ftp://ftp.suse.com/pub/people/tiwai/misc/
 
 Also a git repository is available:
 
@@ -764,7 +764,7 @@ operation, the jack plugging simulation, etc.
 
 The package is found in:
 
-- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/misc/
+- ftp://ftp.suse.com/pub/people/tiwai/misc/
 
 A git repository is available:
 
index 3e2ec9cbf3976d0d21c6ee90d7fe075a210a33eb..d50c14df34112ed2095942062bcaab90d90697bd 100644 (file)
@@ -50,8 +50,7 @@ Machine DAI Configuration
 The machine DAI configuration glues all the codec and CPU DAIs together. It can
 also be used to set up the DAI system clock and for any machine related DAI
 initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
 
 struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
 
@@ -83,8 +82,7 @@ Machine Power Map
 The machine driver can optionally extend the codec power map and to become an
 audio power map of the audio subsystem. This allows for automatic power up/down
 of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
 
 
 Machine Controls
index 37a02ce5484176670fde42a5bb915427316587f3..f0ffc27d4c0ac9d52efa62ecc0f775fd877b7387 100644 (file)
@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
 [SourceDisksFiles]
 [SourceDisksNames]
 [DeviceList]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 [DeviceList.NTamd64]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 
 ;------------------------------------------------------------------------------
index 7945b0bd35e2ad50d7561ffa88a428c21175b50b..e2a4b5287361d25c0800954cbc79eccea88291d3 100644 (file)
@@ -1100,6 +1100,15 @@ emulate them efficiently. The fields in each entry are defined as follows:
    eax, ebx, ecx, edx: the values returned by the cpuid instruction for
          this function/index combination
 
+The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
+as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
+support.  Instead it is reported via
+
+  ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
+
+if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
+feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
 4.47 KVM_PPC_GET_PVINFO
 
 Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1151,6 +1160,13 @@ following flags are specified:
 /* Depends on KVM_CAP_IOMMU */
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
 
+The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
+isolation of the device.  Usages not specifying this flag are deprecated.
+
+Only PCI header type 0 devices with PCI BAR resources are supported by
+device assignment.  The user requesting this ioctl must have read/write
+access to the PCI sysfs resource files associated with the device.
+
 4.49 KVM_DEASSIGN_PCI_DEVICE
 
 Capability: KVM_CAP_DEVICE_DEASSIGNMENT
index 071a9967434750dffe00e68904832f5e374a6a6c..0cc83fc1d8b208d76026348848868ee11f2c292b 100644 (file)
@@ -511,8 +511,8 @@ M:  Joerg Roedel <joerg.roedel@amd.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
 S:     Supported
-F:     arch/x86/kernel/amd_iommu*.c
-F:     arch/x86/include/asm/amd_iommu*.h
+F:     drivers/iommu/amd_iommu*.[ch]
+F:     include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
 M:     Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -789,6 +789,7 @@ L:  linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.pengutronix.de/git/imx/linux-2.6.git
 F:     arch/arm/mach-mx*/
+F:     arch/arm/mach-imx/
 F:     arch/arm/plat-mxc/
 
 ARM/FREESCALE IMX51
@@ -804,6 +805,13 @@ S: Maintained
 T:     git git://git.linaro.org/people/shawnguo/linux-2.6.git
 F:     arch/arm/mach-imx/*imx6*
 
+ARM/FREESCALE MXS ARM ARCHITECTURE
+M:     Shawn Guo <shawn.guo@linaro.org>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+T:     git git://git.linaro.org/people/shawnguo/linux-2.6.git
+F:     arch/arm/mach-mxs/
+
 ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1046,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
 M:     Ben Dooks <ben-linux@fluff.org>
 M:     Kukjin Kim <kgene.kim@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 W:     http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/plat-samsung/
 F:     arch/arm/plat-s3c24xx/
 F:     arch/arm/plat-s5p/
+F:     arch/arm/mach-s3c24*/
+F:     arch/arm/mach-s3c64xx/
 F:     drivers/*/*s3c2410*
 F:     drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2440/
-F:     arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c64xx/
+F:     drivers/spi/spi-s3c*
+F:     sound/soc/samsung/*
 
 ARM/S5P EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene.kim@samsung.com>
@@ -1707,11 +1698,9 @@ F:       arch/x86/include/asm/tce.h
 
 CAN NETWORK LAYER
 M:     Oliver Hartkopp <socketcan@hartkopp.net>
-M:     Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
-M:     Urs Thuermann <urs.thuermann@volkswagen.de>
 L:     linux-can@vger.kernel.org
-L:     netdev@vger.kernel.org
-W:     http://developer.berlios.de/projects/socketcan/
+W:     http://gitorious.org/linux-can
+T:     git git://gitorious.org/linux-can/linux-can-next.git
 S:     Maintained
 F:     net/can/
 F:     include/linux/can.h
@@ -1722,9 +1711,10 @@ F:       include/linux/can/gw.h
 
 CAN NETWORK DRIVERS
 M:     Wolfgang Grandegger <wg@grandegger.com>
+M:     Marc Kleine-Budde <mkl@pengutronix.de>
 L:     linux-can@vger.kernel.org
-L:     netdev@vger.kernel.org
-W:     http://developer.berlios.de/projects/socketcan/
+W:     http://gitorious.org/linux-can
+T:     git git://gitorious.org/linux-can/linux-can-next.git
 S:     Maintained
 F:     drivers/net/can/
 F:     include/linux/can/dev.h
@@ -1789,6 +1779,14 @@ F:       include/net/cfg80211.h
 F:     net/wireless/*
 X:     net/wireless/wext*
 
+CHAR and MISC DRIVERS
+M:     Arnd Bergmann <arnd@arndb.de>
+M:     Greg Kroah-Hartman <greg@kroah.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
+S:     Maintained
+F:     drivers/char/*
+F:     drivers/misc/*
+
 CHECKPATCH
 M:     Andy Whitcroft <apw@canonical.com>
 S:     Supported
@@ -1927,9 +1925,11 @@ S:       Maintained
 F:     drivers/connector/
 
 CONTROL GROUPS (CGROUPS)
-M:     Paul Menage <paul@paulmenage.org>
+M:     Tejun Heo <tj@kernel.org>
 M:     Li Zefan <lizf@cn.fujitsu.com>
 L:     containers@lists.linux-foundation.org
+L:     cgroups@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
 S:     Maintained
 F:     include/linux/cgroup*
 F:     kernel/cgroup*
@@ -2584,7 +2584,7 @@ S:        Maintained
 F:     drivers/net/ethernet/i825xx/eexpress.*
 
 ETHERNET BRIDGE
-M:     Stephen Hemminger <shemminger@linux-foundation.org>
+M:     Stephen Hemminger <shemminger@vyatta.com>
 L:     bridge@lists.linux-foundation.org
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -2699,7 +2699,7 @@ FIREWIRE SUBSYSTEM
 M:     Stefan Richter <stefanr@s5r6.in-berlin.de>
 L:     linux1394-devel@lists.sourceforge.net
 W:     http://ieee1394.wiki.kernel.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
 S:     Maintained
 F:     drivers/firewire/
 F:     include/linux/firewire*.h
@@ -3100,6 +3100,7 @@ F:        include/linux/hid*
 
 HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Maintained
 F:     Documentation/timers/
 F:     kernel/hrtimer.c
@@ -3609,7 +3610,7 @@ F:        net/irda/
 IRQ SUBSYSTEM
 M:     Thomas Gleixner <tglx@linutronix.de>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     kernel/irq/
 
 ISAPNP
@@ -3718,7 +3719,7 @@ F:        fs/jbd2/
 F:     include/linux/jbd2.h
 
 JSM Neo PCI based serial card
-M:     Breno Leitao <leitao@linux.vnet.ibm.com>
+M:     Lucas Tavares <lucaskt@linux.vnet.ibm.com>
 L:     linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/tty/serial/jsm/
@@ -4097,7 +4098,7 @@ F:        drivers/hwmon/lm90.c
 LOCKDEP AND LOCKSTAT
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
 S:     Maintained
 F:     Documentation/lockdep*.txt
 F:     Documentation/lockstat.txt
@@ -4279,7 +4280,9 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     Documentation/dvb/
 F:     Documentation/video4linux/
+F:     Documentation/DocBook/media/
 F:     drivers/media/
+F:     drivers/staging/media/
 F:     include/media/
 F:     include/linux/dvb/
 F:     include/linux/videodev*.h
@@ -4301,9 +4304,11 @@ F:       include/linux/mm.h
 F:     mm/
 
 MEMORY RESOURCE CONTROLLER
+M:     Johannes Weiner <hannes@cmpxchg.org>
+M:     Michal Hocko <mhocko@suse.cz>
 M:     Balbir Singh <bsingharora@gmail.com>
-M:     Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
 M:     KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/memcontrol.c
@@ -4337,7 +4342,7 @@ MIPS
 M:     Ralf Baechle <ralf@linux-mips.org>
 L:     linux-mips@linux-mips.org
 W:     http://www.linux-mips.org/
-T:     git git://git.linux-mips.org/pub/scm/linux.git
+T:     git git://git.linux-mips.org/pub/scm/ralf/linux.git
 Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
 S:     Supported
 F:     Documentation/mips/
@@ -4470,7 +4475,7 @@ S:        Supported
 F:     drivers/infiniband/hw/nes/
 
 NETEM NETWORK EMULATOR
-M:     Stephen Hemminger <shemminger@linux-foundation.org>
+M:     Stephen Hemminger <shemminger@vyatta.com>
 L:     netem@lists.linux-foundation.org
 S:     Maintained
 F:     net/sched/sch_netem.c
@@ -4849,6 +4854,14 @@ S:       Maintained
 T:     git git://openrisc.net/~jonas/linux
 F:     arch/openrisc
 
+OPENVSWITCH
+M:     Jesse Gross <jesse@nicira.com>
+L:     dev@openvswitch.org
+W:     http://openvswitch.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+S:     Maintained
+F:     net/openvswitch/
+
 OPL4 DRIVER
 M:     Clemens Ladisch <clemens@ladisch.de>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -4947,7 +4960,7 @@ F:        drivers/char/ppdev.c
 F:     include/linux/ppdev.h
 
 PARAVIRT_OPS INTERFACE
-M:     Jeremy Fitzhardinge <jeremy@xensource.com>
+M:     Jeremy Fitzhardinge <jeremy@goop.org>
 M:     Chris Wright <chrisw@sous-sol.org>
 M:     Alok Kataria <akataria@vmware.com>
 M:     Rusty Russell <rusty@rustcorp.com.au>
@@ -5083,6 +5096,7 @@ M:        Peter Zijlstra <a.p.zijlstra@chello.nl>
 M:     Paul Mackerras <paulus@samba.org>
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Supported
 F:     kernel/events/*
 F:     include/linux/perf_event.h
@@ -5162,6 +5176,7 @@ F:        drivers/scsi/pm8001/
 
 POSIX CLOCKS and TIMERS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     fs/timerfd.c
 F:     include/linux/timer*
@@ -5366,6 +5381,7 @@ S:        Supported
 F:     drivers/scsi/qla4xxx/
 
 QLOGIC QLA3XXX NETWORK DRIVER
+M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Ron Mercer <ron.mercer@qlogic.com>
 M:     linux-driver@qlogic.com
 L:     netdev@vger.kernel.org
@@ -5656,7 +5672,6 @@ F:        drivers/media/video/*7146*
 F:     include/media/*7146*
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Jassi Brar <jassisinghbrar@gmail.com>
 M:     Sangbeom Kim <sbkim73@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
@@ -5678,6 +5693,7 @@ F:        drivers/dma/dw_dmac.c
 TIMEKEEPING, NTP
 M:     John Stultz <johnstul@us.ibm.com>
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     include/linux/clocksource.h
 F:     include/linux/time.h
@@ -5702,6 +5718,7 @@ F:        drivers/watchdog/sc1200wdt.c
 SCHEDULER
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Peter Zijlstra <peterz@infradead.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
 F:     kernel/sched*
 F:     include/linux/sched.h
@@ -5884,7 +5901,6 @@ F:        drivers/net/ethernet/emulex/benet/
 
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M:     Steve Hodgson <shodgson@solarflare.com>
 M:     Ben Hutchings <bhutchings@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -5985,7 +6001,7 @@ S:        Maintained
 F:     drivers/usb/misc/sisusbvga/
 
 SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
-M:     Stephen Hemminger <shemminger@linux-foundation.org>
+M:     Stephen Hemminger <shemminger@vyatta.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/marvell/sk*
@@ -6492,6 +6508,13 @@ W:       http://tcp-lp-mod.sourceforge.net/
 S:     Maintained
 F:     net/ipv4/tcp_lp.c
 
+TEAM DRIVER
+M:     Jiri Pirko <jpirko@redhat.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/team/
+F:     include/linux/if_team.h
+
 TEGRA SUPPORT
 M:     Colin Cross <ccross@android.com>
 M:     Olof Johansson <olof@lixom.net>
@@ -6629,7 +6652,7 @@ TRACING
 M:     Steven Rostedt <rostedt@goodmis.org>
 M:     Frederic Weisbecker <fweisbec@gmail.com>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Maintained
 F:     Documentation/trace/ftrace.txt
 F:     arch/*/*/*/ftrace.h
@@ -7379,7 +7402,7 @@ M:        Thomas Gleixner <tglx@linutronix.de>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     "H. Peter Anvin" <hpa@zytor.com>
 M:     x86@kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
 F:     Documentation/x86/
 F:     arch/x86/
@@ -7399,8 +7422,8 @@ S:        Maintained
 F:     arch/x86/kernel/cpu/mcheck/*
 
 XEN HYPERVISOR INTERFACE
-M:     Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Jeremy Fitzhardinge <jeremy@goop.org>
 L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
 L:     virtualization@lists.linux-foundation.org
 S:     Supported
@@ -7433,7 +7456,8 @@ F:        drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
 P:     Silicon Graphics Inc
-M:     Alex Elder <aelder@sgi.com>
+M:     Ben Myers <bpm@sgi.com>
+M:     Alex Elder <elder@kernel.org>
 M:     xfs-masters@oss.sgi.com
 L:     xfs@oss.sgi.com
 W:     http://oss.sgi.com/projects/xfs
index dab8610c4d6f63a2cc589764596e07229dd7c761..adddd11c3b3b8a2827dc4f591b573b1fde5768a9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index 44789eff983f444c22cd967cbe418f7e50159358..b259c7c644e357a9999c3767fd34431d8c39ae8a 100644 (file)
@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
          be avoided when possible.
 
 config PHYS_OFFSET
-       hex "Physical address of main memory"
+       hex "Physical address of main memory" if MMU
        depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+       default DRAM_BASE if !MMU
        help
          Please provide the physical address corresponding to the
          location of main memory in your system.
@@ -1231,7 +1232,7 @@ config ARM_ERRATA_742231
          capabilities of the processor.
 
 config PL310_ERRATA_588369
-       bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+       bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
        depends on CACHE_L2X0
        help
           The PL310 L2 cache controller implements three types of Clean &
@@ -1245,7 +1246,7 @@ config PL310_ERRATA_588369
 
 config ARM_ERRATA_720789
        bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
-       depends on CPU_V7 && SMP
+       depends on CPU_V7
        help
          This option enables the workaround for the 720789 Cortex-A9 (prior to
          r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
@@ -1256,7 +1257,7 @@ config ARM_ERRATA_720789
          entries regardless of the ASID.
 
 config PL310_ERRATA_727915
-       bool "Background Clean & Invalidate by Way operation can cause data corruption"
+       bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
        depends on CACHE_L2X0
        help
          PL310 implements the Clean & Invalidate by Way L2 cache maintenance
@@ -1281,7 +1282,7 @@ config ARM_ERRATA_743622
 
 config ARM_ERRATA_751472
        bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
-       depends on CPU_V7 && SMP
+       depends on CPU_V7
        help
          This option enables the workaround for the 751472 Cortex-A9 (prior
          to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
@@ -1289,8 +1290,8 @@ config ARM_ERRATA_751472
          operation is received by a CPU before the ICIALLUIS has completed,
          potentially leading to corrupted entries in the cache or TLB.
 
-config ARM_ERRATA_753970
-       bool "ARM errata: cache sync operation may be faulty"
+config PL310_ERRATA_753970
+       bool "PL310 errata: cache sync operation may be faulty"
        depends on CACHE_PL310
        help
          This option enables the workaround for the 753970 PL310 (r3p0) erratum.
@@ -1352,6 +1353,18 @@ config ARM_ERRATA_764369
          relevant cache maintenance functions and sets a specific bit
          in the diagnostic control register of the SCU.
 
+config PL310_ERRATA_769419
+       bool "PL310 errata: no automatic Store Buffer drain"
+       depends on CACHE_L2X0
+       help
+         On revisions of the PL310 prior to r3p2, the Store Buffer does
+         not automatically drain. This can cause normal, non-cacheable
+         writes to be retained when the memory system is idle, leading
+         to suboptimal I/O performance for drivers using coherent DMA.
+         This option adds a write barrier to the cpu_idle loop so that,
+         on systems with an outer cache, the store buffer is drained
+         explicitly.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
index 176062ac7f07305014357372c42285ff07a09c68..5df26a9976a26c10ddfcf0f788302dd2caeb8769 100644 (file)
@@ -65,6 +65,8 @@ $(obj)/%.dtb: $(src)/dts/%.dts
 
 $(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
 
+clean-files := *.dtb
+
 quiet_cmd_uimage = UIMAGE  $@
       cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \
                   -C none -a $(LOADADDR) -e $(STARTADDR) \
index 0e6ae470c94f26589c0721a059dfaded9b2f30b1..410a546060a2eecf82859d76c528223e8796f9f4 100644 (file)
@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
                sizeof(u32));
        BUG_ON(!gic->saved_ppi_conf);
 
-       cpu_pm_register_notifier(&gic_notifier_block);
+       if (gic == &gic_data[0])
+               cpu_pm_register_notifier(&gic_notifier_block);
 }
 #else
 static void __init gic_pm_init(struct gic_chip_data *gic)
@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
         * For primary GICs, skip over SGIs.
         * For secondary GICs, skip over PPIs, too.
         */
+       domain->hwirq_base = 32;
        if (gic_nr == 0) {
                gic_cpu_base_addr = cpu_base;
-               domain->hwirq_base = 16;
-               if (irq_start > 0)
-                       irq_start = (irq_start & ~31) + 16;
-       } else
-               domain->hwirq_base = 32;
+
+               if ((irq_start & 31) > 0) {
+                       domain->hwirq_base = 16;
+                       if (irq_start != -1)
+                               irq_start = (irq_start & ~31) + 16;
+               }
+       }
 
        /*
         * Find out how many interrupts are supported.
index 7129cfbdacd6887920cf28994625be7b35f65503..8d8df744f7a5eb72e02bc65bf85029c2b9f60a63 100644 (file)
  */
 #define MCODE_BUFF_PER_REQ     256
 
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-#define MARK_FREE(req) do { \
-                               _emit_END(0, (req)->mc_cpu); \
-                               (req)->mc_len = 0; \
-                       } while (0)
-
 /* If the _pl330_req is available to the client */
 #define IS_FREE(req)   (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
 
@@ -301,8 +290,10 @@ struct pl330_thread {
        struct pl330_dmac *dmac;
        /* Only two at a time */
        struct _pl330_req req[2];
-       /* Index of the last submitted request */
+       /* Index of the last enqueued request */
        unsigned lstenq;
+       /* Index of the last submitted request or -1 if the DMA is stopped */
+       int req_running;
 };
 
 enum pl330_dmac_state {
@@ -778,6 +769,22 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
        writel(0, regs + DBGCMD);
 }
 
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+static void mark_free(struct pl330_thread *thrd, int idx)
+{
+       struct _pl330_req *req = &thrd->req[idx];
+
+       _emit_END(0, req->mc_cpu);
+       req->mc_len = 0;
+
+       thrd->req_running = -1;
+}
+
 static inline u32 _state(struct pl330_thread *thrd)
 {
        void __iomem *regs = thrd->dmac->pinfo->base;
@@ -836,31 +843,6 @@ static inline u32 _state(struct pl330_thread *thrd)
        }
 }
 
-/* If the request 'req' of thread 'thrd' is currently active */
-static inline bool _req_active(struct pl330_thread *thrd,
-               struct _pl330_req *req)
-{
-       void __iomem *regs = thrd->dmac->pinfo->base;
-       u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
-
-       if (IS_FREE(req))
-               return false;
-
-       return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
-}
-
-/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
-static inline unsigned _thrd_active(struct pl330_thread *thrd)
-{
-       if (_req_active(thrd, &thrd->req[0]))
-               return 1; /* First req active */
-
-       if (_req_active(thrd, &thrd->req[1]))
-               return 2; /* Second req active */
-
-       return 0;
-}
-
 static void _stop(struct pl330_thread *thrd)
 {
        void __iomem *regs = thrd->dmac->pinfo->base;
@@ -892,17 +874,22 @@ static bool _trigger(struct pl330_thread *thrd)
        struct _arg_GO go;
        unsigned ns;
        u8 insn[6] = {0, 0, 0, 0, 0, 0};
+       int idx;
 
        /* Return if already ACTIVE */
        if (_state(thrd) != PL330_STATE_STOPPED)
                return true;
 
-       if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
-               req = &thrd->req[1 - thrd->lstenq];
-       else if (!IS_FREE(&thrd->req[thrd->lstenq]))
-               req = &thrd->req[thrd->lstenq];
-       else
-               req = NULL;
+       idx = 1 - thrd->lstenq;
+       if (!IS_FREE(&thrd->req[idx]))
+               req = &thrd->req[idx];
+       else {
+               idx = thrd->lstenq;
+               if (!IS_FREE(&thrd->req[idx]))
+                       req = &thrd->req[idx];
+               else
+                       req = NULL;
+       }
 
        /* Return if no request */
        if (!req || !req->r)
@@ -933,6 +920,8 @@ static bool _trigger(struct pl330_thread *thrd)
        /* Only manager can execute GO */
        _execute_DBGINSN(thrd, insn, true);
 
+       thrd->req_running = idx;
+
        return true;
 }
 
@@ -1211,8 +1200,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
        ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
        ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
 
-       ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
-       ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
+       ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
+       ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
 
        ccr |= (rqc->swap << CC_SWAP_SHFT);
 
@@ -1382,8 +1371,8 @@ static void pl330_dotask(unsigned long data)
 
                        thrd->req[0].r = NULL;
                        thrd->req[1].r = NULL;
-                       MARK_FREE(&thrd->req[0]);
-                       MARK_FREE(&thrd->req[1]);
+                       mark_free(thrd, 0);
+                       mark_free(thrd, 1);
 
                        /* Clear the reset flag */
                        pl330->dmac_tbd.reset_chan &= ~(1 << i);
@@ -1461,14 +1450,12 @@ int pl330_update(const struct pl330_info *pi)
 
                        thrd = &pl330->channels[id];
 
-                       active = _thrd_active(thrd);
-                       if (!active) /* Aborted */
+                       active = thrd->req_running;
+                       if (active == -1) /* Aborted */
                                continue;
 
-                       active -= 1;
-
                        rqdone = &thrd->req[active];
-                       MARK_FREE(rqdone);
+                       mark_free(thrd, active);
 
                        /* Get going again ASAP */
                        _start(thrd);
@@ -1509,7 +1496,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
        struct pl330_thread *thrd = ch_id;
        struct pl330_dmac *pl330;
        unsigned long flags;
-       int ret = 0, active;
+       int ret = 0, active = thrd->req_running;
 
        if (!thrd || thrd->free || thrd->dmac->state == DYING)
                return -EINVAL;
@@ -1525,28 +1512,24 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
 
                thrd->req[0].r = NULL;
                thrd->req[1].r = NULL;
-               MARK_FREE(&thrd->req[0]);
-               MARK_FREE(&thrd->req[1]);
+               mark_free(thrd, 0);
+               mark_free(thrd, 1);
                break;
 
        case PL330_OP_ABORT:
-               active = _thrd_active(thrd);
-
                /* Make sure the channel is stopped */
                _stop(thrd);
 
                /* ABORT is only for the active req */
-               if (!active)
+               if (active == -1)
                        break;
 
-               active--;
-
                thrd->req[active].r = NULL;
-               MARK_FREE(&thrd->req[active]);
+               mark_free(thrd, active);
 
                /* Start the next */
        case PL330_OP_START:
-               if (!_thrd_active(thrd) && !_start(thrd))
+               if ((active == -1) && !_start(thrd))
                        ret = -EIO;
                break;
 
@@ -1587,14 +1570,13 @@ int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
        else
                pstatus->faulting = false;
 
-       active = _thrd_active(thrd);
+       active = thrd->req_running;
 
-       if (!active) {
+       if (active == -1) {
                /* Indicate that the thread is not running */
                pstatus->top_req = NULL;
                pstatus->wait_req = NULL;
        } else {
-               active--;
                pstatus->top_req = thrd->req[active].r;
                pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
                                        ? thrd->req[1 - active].r : NULL;
@@ -1623,6 +1605,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
        return -1;
 }
 
+static bool _chan_ns(const struct pl330_info *pi, int i)
+{
+       return pi->pcfg.irq_ns & (1 << i);
+}
+
 /* Upon success, returns IdentityToken for the
  * allocated channel, NULL otherwise.
  */
@@ -1647,15 +1634,16 @@ void *pl330_request_channel(const struct pl330_info *pi)
 
        for (i = 0; i < chans; i++) {
                thrd = &pl330->channels[i];
-               if (thrd->free) {
+               if ((thrd->free) && (!_manager_ns(thrd) ||
+                                       _chan_ns(pi, i))) {
                        thrd->ev = _alloc_event(thrd);
                        if (thrd->ev >= 0) {
                                thrd->free = false;
                                thrd->lstenq = 1;
                                thrd->req[0].r = NULL;
-                               MARK_FREE(&thrd->req[0]);
+                               mark_free(thrd, 0);
                                thrd->req[1].r = NULL;
-                               MARK_FREE(&thrd->req[1]);
+                               mark_free(thrd, 1);
                                break;
                        }
                }
@@ -1761,14 +1749,14 @@ static inline void _reset_thread(struct pl330_thread *thrd)
        thrd->req[0].mc_bus = pl330->mcode_bus
                                + (thrd->id * pi->mcbufsz);
        thrd->req[0].r = NULL;
-       MARK_FREE(&thrd->req[0]);
+       mark_free(thrd, 0);
 
        thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
                                + pi->mcbufsz / 2;
        thrd->req[1].mc_bus = thrd->req[0].mc_bus
                                + pi->mcbufsz / 2;
        thrd->req[1].r = NULL;
-       MARK_FREE(&thrd->req[1]);
+       mark_free(thrd, 1);
 }
 
 static int dmac_alloc_threads(struct pl330_dmac *pl330)
similarity index 93%
rename from arch/arm/configs/at91cap9adk_defconfig
rename to arch/arm/configs/at91cap9_defconfig
index ffb1edd933633b6f5cc661df0347ba2f4416b9fc..8826eb218e73f861a1dc36a1dc875c4430b28a50 100644 (file)
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_RARP=y
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
@@ -52,16 +51,12 @@ CONFIG_MTD_NAND_ATMEL=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SCSI_MULTI_LUN=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
 CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -81,7 +76,6 @@ CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_FB=y
 CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_CLUT224 is not set
@@ -99,7 +93,6 @@ CONFIG_MMC_AT91=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT91SAM9=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
index 38cb7c98542623a92cc7791a89f701e9fe5adf48..bbe4e1a1f5d86f81464e393e2fd2ab228e34542c 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m
 CONFIG_BRIDGE=m
 CONFIG_VLAN_8021Q=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_AFS_PARTS=y
 CONFIG_MTD_CHAR=y
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_LEGACY=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=m
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
 CONFIG_TUN=m
+CONFIG_ARM_AT91_ETHER=y
 CONFIG_PHYLIB=y
 CONFIG_DAVICOM_PHY=y
 CONFIG_SMSC_PHY=y
 CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=y
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
 CONFIG_USB_CATC=m
 CONFIG_USB_KAWETH=m
 CONFIG_USB_PEGASUS=m
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
 CONFIG_USB_ALI_M5632=y
 CONFIG_USB_AN2720=y
 CONFIG_USB_EPSON2888=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
 CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_LEGACY_PTY_COUNT=32
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=32
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_MAC_PARTITION=y
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
 CONFIG_CRYPTO_PCBC=y
 CONFIG_CRYPTO_SHA1=y
similarity index 86%
rename from arch/arm/configs/at91sam9260ek_defconfig
rename to arch/arm/configs/at91sam9260_defconfig
index f8a9226413bfe59735be5a6c9538a3c200bc5b0a..505b3765f87ebfb269184a11b9b096dc4fff8fea 100644 (file)
@@ -12,11 +12,23 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_AT91=y
 CONFIG_ARCH_AT91SAM9260=y
+CONFIG_ARCH_AT91SAM9260_SAM9XE=y
 CONFIG_MACH_AT91SAM9260EK=y
+CONFIG_MACH_CAM60=y
+CONFIG_MACH_SAM9_L9260=y
+CONFIG_MACH_AFEB9260=y
+CONFIG_MACH_USB_A9260=y
+CONFIG_MACH_QIL_A9260=y
+CONFIG_MACH_CPU9260=y
+CONFIG_MACH_FLEXIBITY=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
 # CONFIG_ARM_THUMB is not set
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
 CONFIG_FPE_NWFPE=y
 CONFIG_NET=y
@@ -33,12 +45,10 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SCSI_MULTI_LUN=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
 CONFIG_MACB=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -55,7 +65,6 @@ CONFIG_I2C_GPIO=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_AT91SAM9X_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
 # CONFIG_USB_HID is not set
 CONFIG_USB=y
 CONFIG_USB_DEVICEFS=y
@@ -71,7 +80,6 @@ CONFIG_USB_G_SERIAL=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT91SAM9=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_CRAMFS=y
similarity index 90%
rename from arch/arm/configs/at91sam9g20ek_defconfig
rename to arch/arm/configs/at91sam9g20_defconfig
index 9e90e6d792973042ec81faf2f78433e416a2752d..9123568d9a8db424f34fceaa66aa69d5fb4374fe 100644 (file)
@@ -14,6 +14,15 @@ CONFIG_ARCH_AT91=y
 CONFIG_ARCH_AT91SAM9G20=y
 CONFIG_MACH_AT91SAM9G20EK=y
 CONFIG_MACH_AT91SAM9G20EK_2MMC=y
+CONFIG_MACH_CPU9G20=y
+CONFIG_MACH_ACMENETUSFOXG20=y
+CONFIG_MACH_PORTUXG20=y
+CONFIG_MACH_STAMP9G20=y
+CONFIG_MACH_PCONTROL_G20=y
+CONFIG_MACH_GSIA18S=y
+CONFIG_MACH_USB_A9G20=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
 # CONFIG_ARM_THUMB is not set
 CONFIG_AEABI=y
@@ -21,9 +30,10 @@ CONFIG_LEDS=y
 CONFIG_LEDS_CPU=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
 CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -37,8 +47,6 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
@@ -48,17 +56,13 @@ CONFIG_MTD_NAND_ATMEL=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SCSI_MULTI_LUN=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
 CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
 CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
@@ -66,15 +70,14 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
+CONFIG_LEGACY_PTY_COUNT=16
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
 CONFIG_HW_RANDOM=y
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=y
 # CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_SEQUENCER=y
@@ -82,7 +85,6 @@ CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
 CONFIG_SND_SEQUENCER_OSS=y
 # CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_SND_AT73C213=y
 CONFIG_USB=y
 CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_DEVICE_CLASS is not set
@@ -105,7 +107,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT91SAM9=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index c5876d244f4b2d667db3d4753bdc1bdab1203064..606d48f3b8f81c10370b718d7b2b3475818b9a03 100644 (file)
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_AT91=y
 CONFIG_ARCH_AT91SAM9G45=y
 CONFIG_MACH_AT91SAM9M10G45EK=y
+CONFIG_MACH_AT91SAM_DT=y
 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
 CONFIG_AT91_SLOW_CLOCK=y
 CONFIG_AEABI=y
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
 CONFIG_MII=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_DAVICOM_PHY=y
 CONFIG_LIBERTAS_THINFIRM=m
 CONFIG_LIBERTAS_THINFIRM_USB=m
 CONFIG_AT76C50X_USB=m
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 # CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
 CONFIG_FB=y
 CONFIG_FB_ATMEL=y
 CONFIG_FB_UDL=m
similarity index 94%
rename from arch/arm/configs/at91sam9rlek_defconfig
rename to arch/arm/configs/at91sam9rl_defconfig
index 75621e4d03fc2992d6f02e022aed4ccd7a021e29..ad562ee64209cd08e57f9b31160c089b810d7c11 100644 (file)
@@ -23,8 +23,6 @@ CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
@@ -35,7 +33,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=4
 CONFIG_BLK_DEV_RAM_SIZE=24576
-CONFIG_ATMEL_SSC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SCSI_MULTI_LUN=y
@@ -62,13 +59,11 @@ CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_AT91SAM9X_WATCHDOG=y
 CONFIG_FB=y
 CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_MMC=y
 CONFIG_MMC_AT91=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT91SAM9=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index 227a477346edb44a2a83ba49a0f9b7cd2b165ec0..d95763d5f0d83df6543364e3ea648c2ad88994bf 100644 (file)
@@ -287,7 +287,7 @@ CONFIG_USB=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
index 176ec22af0342f215b59a2778144d69ef0f4c806..fd996bb13022879dee93c308c8e8ad154918ec20 100644 (file)
@@ -263,7 +263,7 @@ CONFIG_USB=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
index 11a4192197c8fbaef84a10a53ce24fb497ae3947..cf497ce41dfe725bf5f4549faab3bc4c82a5728c 100644 (file)
@@ -18,9 +18,10 @@ CONFIG_ARCH_MXC=y
 CONFIG_ARCH_IMX_V4_V5=y
 CONFIG_ARCH_MX1ADS=y
 CONFIG_MACH_SCB9328=y
+CONFIG_MACH_APF9328=y
 CONFIG_MACH_MX21ADS=y
 CONFIG_MACH_MX25_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX25=y
+CONFIG_MACH_EUKREA_CPUIMX25SD=y
 CONFIG_MACH_MX27ADS=y
 CONFIG_MACH_PCM038=y
 CONFIG_MACH_CPUIMX27=y
@@ -72,17 +73,16 @@ CONFIG_MTD_CFI_GEOMETRY=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_MXC=y
 CONFIG_MTD_UBI=y
 CONFIG_MISC_DEVICES=y
 CONFIG_EEPROM_AT24=y
 CONFIG_EEPROM_AT25=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
 CONFIG_DM9000=y
+CONFIG_SMC91X=y
 CONFIG_SMC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -100,6 +100,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_IMX=y
 CONFIG_SPI=y
 CONFIG_SPI_IMX=y
+CONFIG_SPI_SPIDEV=y
 CONFIG_W1=y
 CONFIG_W1_MASTER_MXC=y
 CONFIG_W1_SLAVE_THERM=y
@@ -139,6 +140,7 @@ CONFIG_MMC=y
 CONFIG_MMC_MXC=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_MC13783=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
index a88e64d4e9a5862c28160c9d1b7d3cd01edf0550..443675d317e6de326c576caf47ae9ff179a0814c 100644 (file)
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_USB_GADGETFS=m
index 7b63462b349d7de9965b13bec7c1c85e57c25fcd..945a34f2a34dbd9e711ac440500d1e17cfad2ac0 100644 (file)
@@ -48,13 +48,7 @@ CONFIG_MACH_SX1=y
 CONFIG_MACH_NOKIA770=y
 CONFIG_MACH_AMS_DELTA=y
 CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
 CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
 # CONFIG_ARM_THUMB is not set
 CONFIG_PCCARD=y
 CONFIG_OMAP_CF=y
index 4a5a12681be2038c908318ffc953c3ad7a9bd431..374000ec4e4e9a66bbf07304abd75dced85a0b66 100644 (file)
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_U300=y
 CONFIG_MACH_U300=y
 CONFIG_MACH_U300_BS335=y
-CONFIG_MACH_U300_DUAL_RAM=y
-CONFIG_U300_DEBUG=y
 CONFIG_MACH_U300_SPIDUMMY=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
 CONFIG_CPU_IDLE=y
 CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
 # CONFIG_SUSPEND is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_MISC_DEVICES is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=16
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 # CONFIG_HWMON is not set
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_ARMMMCI=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_CRC32 is not set
index 97d31a4663daf0c6186cd948a05abf35687686d3..2d7b6e7b72713cba72df0349462c29dbe38a67ba 100644 (file)
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_U8500=y
 CONFIG_UX500_SOC_DB5500=y
 CONFIG_UX500_SOC_DB8500=y
-CONFIG_MACH_U8500=y
+CONFIG_MACH_HREFV60=y
 CONFIG_MACH_SNOWBALL=y
 CONFIG_MACH_U5500=y
 CONFIG_NO_HZ=y
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_VFP=y
 CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y
 CONFIG_AB8500_PWM=y
 CONFIG_SENSORS_BH1780=y
 CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
 CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
 # CONFIG_WLAN is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
@@ -72,15 +70,12 @@ CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_STMPE=y
 CONFIG_GPIO_TC3589X=y
-# CONFIG_HWMON is not set
 CONFIG_MFD_STMPE=y
 CONFIG_MFD_TC3589X=y
+CONFIG_AB5500_CORE=y
 CONFIG_AB8500_CORE=y
 CONFIG_REGULATOR_AB8500=y
 # CONFIG_HID_SUPPORT is not set
-CONFIG_USB_MUSB_HDRC=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
-CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_GADGET=y
 CONFIG_AB8500_USB=y
 CONFIG_MMC=y
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y
 CONFIG_STE_DMA40=y
 CONFIG_STAGING=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_HSEM_U8500=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
index 59577ad3f4efdfda65e65753054673a32614e80f..547a3c1e59dbcd88ea9da77505691110508f9daf 100644 (file)
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_MCT_U232=m
 CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
index 1db1143a94838cb92419040c40009f17d1ee2f97..7df239bcdf2745b6a3d20e5a90ed3bac27d73ecc 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __ASM_ARM_HARDWARE_L2X0_H
 #define __ASM_ARM_HARDWARE_L2X0_H
 
+#include <linux/errno.h>
+
 #define L2X0_CACHE_ID                  0x000
 #define L2X0_CACHE_TYPE                        0x004
 #define L2X0_CTRL                      0x100
index 7d19425dd496a083eb98557bafe2980843bc9fa3..2b0efc3104ac6f73846fb89cdf0761c400676540 100644 (file)
@@ -13,6 +13,7 @@
 struct tag;
 struct meminfo;
 struct sys_timer;
+struct pt_regs;
 
 struct machine_desc {
        unsigned int            nr;             /* architecture number  */
index 71d99b83cdb980178aac275e487c2db081c041fc..0bda22c094a6dd1b70100444d6e29fa1a41688ef 100644 (file)
@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type);
 extern void
 release_pmu(enum arm_pmu_type type);
 
-/**
- * init_pmu() - Initialise the PMU.
- *
- * Initialise the system ready for PMU enabling. This should typically set the
- * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
- * the actual hardware initialisation.
- */
-extern int
-init_pmu(enum arm_pmu_type type);
-
 #else /* CONFIG_CPU_HAS_PMU */
 
 #include <linux/err.h>
index a7e457ed27c31e1185ebe3a0eaa745d88c35029d..58b8b84adcd2cf5f295e6869b68350f9dcadc798 100644 (file)
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
 
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
 
 #else
 
index c60a2944f95b82a0cc9d5bfea32d88c65ecb202f..4a1123783806b79ef559ed51a093353feb67070e 100644 (file)
 #define __NR_syncfs                    (__NR_SYSCALL_BASE+373)
 #define __NR_sendmmsg                  (__NR_SYSCALL_BASE+374)
 #define __NR_setns                     (__NR_SYSCALL_BASE+375)
+#define __NR_process_vm_readv          (__NR_SYSCALL_BASE+376)
+#define __NR_process_vm_writev         (__NR_SYSCALL_BASE+377)
 
 /*
  * The following SWIs are ARM private.
index a5edf421005cce0d043cbb75be394253127a2582..d1c3f3a71c9454dd665b48a0cfc51bc954576a05 100644 (file)
@@ -30,14 +30,15 @@ enum unwind_reason_code {
 };
 
 struct unwind_idx {
-       unsigned long addr;
+       unsigned long addr_offset;
        unsigned long insn;
 };
 
 struct unwind_table {
        struct list_head list;
-       struct unwind_idx *start;
-       struct unwind_idx *stop;
+       const struct unwind_idx *start;
+       const struct unwind_idx *origin;
+       const struct unwind_idx *stop;
        unsigned long begin_addr;
        unsigned long end_addr;
 };
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
 extern void unwind_table_del(struct unwind_table *tab);
 extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
 
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
-       return 0;
-}
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_ARM_UNWIND
index 9943e9e74a1bda0b17bc6e1ee93ca3ab2b80f970..463ff4a0ec8acaa69372b8abd40a39260ee91736 100644 (file)
                CALL(sys_syncfs)
                CALL(sys_sendmmsg)
 /* 375 */      CALL(sys_setns)
+               CALL(sys_process_vm_readv)
+               CALL(sys_process_vm_writev)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 9ad50c4208aebf5aaf7ee245444da5789935b269..b145f16c91bc786db82fcd3cd66ccdee7b740aa4 100644 (file)
@@ -497,7 +497,7 @@ ENDPROC(__und_usr)
        .popsection
        .pushsection __ex_table,"a"
        .long   1b, 4b
-#if __LINUX_ARM_ARCH__ >= 7
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
        .long   2b, 4b
        .long   3b, 4b
 #endif
index 566c54c2a1fef28bffea090e89bcec998b73ede6..08c82fd844a8683533216048b54ff2f210729fb6 100644 (file)
@@ -360,7 +360,7 @@ __secondary_data:
  *  r13 = *virtual* address to jump to upon completion
  */
 __enable_mmu:
-#ifdef CONFIG_ALIGNMENT_TRAP
+#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
        orr     r0, r0, #CR_A
 #else
        bic     r0, r0, #CR_A
index 9fe8910308af922eda3c17ffee908c99652c1a1f..8a30c89da70ec104d4c1499f3a88a1dc4721e6e0 100644 (file)
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
 static const union decode_item arm_cccc_0001_____1001_table[] = {
        /* Synchronization primitives                                   */
 
+#if __LINUX_ARM_ARCH__ < 6
+       /* Deprecated on ARMv6 and may be UNDEFINED on v7               */
        /* SMP/SWPB             cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
        DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
                                                 REGS(NOPC, NOPC, 0, 0, NOPC)),
-
+#endif
        /* LDREX/STREX{,D,B,H}  cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
        /* And unallocated instructions...                              */
        DECODE_END
index fc82de8bdcce1081801cece62fa3d97480d520e1..ba32b393b3f0c514c83799687348d52655bfe1da 100644 (file)
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
 
        TEST_GROUP("Synchronization primitives")
 
-       /*
-        * Use hard coded constants for SWP instructions to avoid warnings
-        * about deprecated instructions.
-        */
-       TEST_RP( ".word 0xe108e097 @ swp        lr, r",7,VAL2,", [r",8,0,"]")
-       TEST_R(  ".word 0x610d0091 @ swpvs      r0, r",1,VAL1,", [sp]")
-       TEST_RP( ".word 0xe10cd09e @ swp        sp, r",14,VAL2,", [r",12,13*4,"]")
+#if __LINUX_ARM_ARCH__ < 6
+       TEST_RP("swp    lr, r",7,VAL2,", [r",8,0,"]")
+       TEST_R( "swpvs  r0, r",1,VAL1,", [sp]")
+       TEST_RP("swp    sp, r",14,VAL2,", [r",12,13*4,"]")
+#else
+       TEST_UNSUPPORTED(".word 0xe108e097 @ swp        lr, r7, [r8]")
+       TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs      r0, r1, [sp]")
+       TEST_UNSUPPORTED(".word 0xe10cd09e @ swp        sp, r14 [r12]")
+#endif
        TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
        TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
        TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
-       TEST_RP( ".word 0xe148e097 @ swpb       lr, r",7,VAL2,", [r",8,0,"]")
-       TEST_R(  ".word 0x614d0091 @ swpvsb     r0, r",1,VAL1,", [sp]")
+#if __LINUX_ARM_ARCH__ < 6
+       TEST_RP("swpb   lr, r",7,VAL2,", [r",8,0,"]")
+       TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
+#else
+       TEST_UNSUPPORTED(".word 0xe148e097 @ swpb       lr, r7, [r8]")
+       TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb     r0, r1, [sp]")
+#endif
        TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
 
        TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
        TEST_RPR(  "strccd      r",8, VAL2,", [r",13,0, ", r",12,48,"]")
        TEST_RPR(  "strd        r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
        TEST_RPR(  "strcsd      r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
-       TEST_RPR(  "strd        r",2, VAL1,", [r",3, 24,"], r",4,48,"")
+       TEST_RPR(  "strd        r",2, VAL1,", [r",5, 24,"], r",4,48,"")
        TEST_RPR(  "strd        r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
        TEST_UNSUPPORTED(".word 0xe1afc0fa      @ strd r12, [pc, r10]!")
 
index 5e726c31c45aef5084a7e7ddf07bb1762ed4c403..5d8b857922220b4be4e9babf6e050c94625a56eb 100644 (file)
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
 DONT_TEST_IN_ITBLOCK(
        TEST_BF_R(  "cbnz       r",0,0, ", 2f")
        TEST_BF_R(  "cbz        r",2,-1,", 2f")
-       TEST_BF_RX( "cbnz       r",4,1, ", 2f",0x20)
-       TEST_BF_RX( "cbz        r",7,0, ", 2f",0x40)
+       TEST_BF_RX( "cbnz       r",4,1, ", 2f", SPACE_0x20)
+       TEST_BF_RX( "cbz        r",7,0, ", 2f", SPACE_0x40)
 )
        TEST_R("sxth    r0, r",7, HH1,"")
        TEST_R("sxth    r7, r",0, HH2,"")
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
        TESTCASE_START(code)            \
        TEST_ARG_PTR(13, offset)        \
        TEST_ARG_END("")                \
-       TEST_BRANCH_F(code,0)           \
+       TEST_BRANCH_F(code)             \
        TESTCASE_END
 
        TEST("push      {r0}")
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
 
        TEST_BF(  "b    2f")
        TEST_BB(  "b    2b")
-       TEST_BF_X("b    2f", 0x400)
-       TEST_BB_X("b    2b", 0x400)
+       TEST_BF_X("b    2f", SPACE_0x400)
+       TEST_BB_X("b    2b", SPACE_0x400)
 
        TEST_GROUP("Testing instructions in IT blocks")
 
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
        TEST_BB("bne.w  2b")
        TEST_BF("bgt.w  2f")
        TEST_BB("blt.w  2b")
-       TEST_BF_X("bpl.w        2f",0x1000)
+       TEST_BF_X("bpl.w        2f", SPACE_0x1000)
 )
 
        TEST_UNSUPPORTED("msr   cpsr, r0")
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
 
        TEST_BF(  "b.w  2f")
        TEST_BB(  "b.w  2b")
-       TEST_BF_X("b.w  2f", 0x1000)
+       TEST_BF_X("b.w  2f", SPACE_0x1000)
 
        TEST_BF(  "bl.w 2f")
        TEST_BB(  "bl.w 2b")
-       TEST_BB_X("bl.w 2b", 0x1000)
+       TEST_BB_X("bl.w 2b", SPACE_0x1000)
 
        TEST_X( "blx    __dummy_arm_subroutine",
                ".arm                           \n\t"
index 0dc5d77b9356bcd86b4b0c087bf642bc6396e06e..e28a869b1ae4b7be5abfb7d088c3214bfe2caab7 100644 (file)
@@ -149,23 +149,31 @@ struct test_arg_end {
        "1:     "instruction"                           \n\t"   \
        "       nop                                     \n\t"
 
-#define TEST_BRANCH_F(instruction, xtra_dist)                  \
+#define TEST_BRANCH_F(instruction)                             \
        TEST_INSTRUCTION(instruction)                           \
-       ".if "#xtra_dist"                               \n\t"   \
        "       b       99f                             \n\t"   \
-       ".space "#xtra_dist"                            \n\t"   \
-       ".endif                                         \n\t"   \
+       "2:     nop                                     \n\t"
+
+#define TEST_BRANCH_B(instruction)                             \
+       "       b       50f                             \n\t"   \
+       "       b       99f                             \n\t"   \
+       "2:     nop                                     \n\t"   \
+       "       b       99f                             \n\t"   \
+       TEST_INSTRUCTION(instruction)
+
+#define TEST_BRANCH_FX(instruction, codex)                     \
+       TEST_INSTRUCTION(instruction)                           \
+       "       b       99f                             \n\t"   \
+       codex"                                          \n\t"   \
        "       b       99f                             \n\t"   \
        "2:     nop                                     \n\t"
 
-#define TEST_BRANCH_B(instruction, xtra_dist)                  \
+#define TEST_BRANCH_BX(instruction, codex)                     \
        "       b       50f                             \n\t"   \
        "       b       99f                             \n\t"   \
        "2:     nop                                     \n\t"   \
        "       b       99f                             \n\t"   \
-       ".if "#xtra_dist"                               \n\t"   \
-       ".space "#xtra_dist"                            \n\t"   \
-       ".endif                                         \n\t"   \
+       codex"                                          \n\t"   \
        TEST_INSTRUCTION(instruction)
 
 #define TESTCASE_END                                           \
@@ -301,47 +309,60 @@ struct test_arg_end {
        TESTCASE_START(code1 #reg1 code2)       \
        TEST_ARG_PTR(reg1, val1)                \
        TEST_ARG_END("")                        \
-       TEST_BRANCH_F(code1 #reg1 code2, 0)     \
+       TEST_BRANCH_F(code1 #reg1 code2)        \
        TESTCASE_END
 
-#define TEST_BF_X(code, xtra_dist)             \
+#define TEST_BF(code)                          \
        TESTCASE_START(code)                    \
        TEST_ARG_END("")                        \
-       TEST_BRANCH_F(code, xtra_dist)          \
+       TEST_BRANCH_F(code)                     \
        TESTCASE_END
 
-#define TEST_BB_X(code, xtra_dist)             \
+#define TEST_BB(code)                          \
        TESTCASE_START(code)                    \
        TEST_ARG_END("")                        \
-       TEST_BRANCH_B(code, xtra_dist)          \
+       TEST_BRANCH_B(code)                     \
        TESTCASE_END
 
-#define TEST_BF_RX(code1, reg, val, code2, xtra_dist)  \
-       TESTCASE_START(code1 #reg code2)                \
-       TEST_ARG_REG(reg, val)                          \
-       TEST_ARG_END("")                                \
-       TEST_BRANCH_F(code1 #reg code2, xtra_dist)      \
+#define TEST_BF_R(code1, reg, val, code2)      \
+       TESTCASE_START(code1 #reg code2)        \
+       TEST_ARG_REG(reg, val)                  \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_F(code1 #reg code2)         \
        TESTCASE_END
 
-#define TEST_BB_RX(code1, reg, val, code2, xtra_dist)  \
-       TESTCASE_START(code1 #reg code2)                \
-       TEST_ARG_REG(reg, val)                          \
-       TEST_ARG_END("")                                \
-       TEST_BRANCH_B(code1 #reg code2, xtra_dist)      \
+#define TEST_BB_R(code1, reg, val, code2)      \
+       TESTCASE_START(code1 #reg code2)        \
+       TEST_ARG_REG(reg, val)                  \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_B(code1 #reg code2)         \
        TESTCASE_END
 
-#define TEST_BF(code)  TEST_BF_X(code, 0)
-#define TEST_BB(code)  TEST_BB_X(code, 0)
-
-#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
-#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
-
 #define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3)        \
        TESTCASE_START(code1 #reg1 code2 #reg2 code3)           \
        TEST_ARG_REG(reg1, val1)                                \
        TEST_ARG_REG(reg2, val2)                                \
        TEST_ARG_END("")                                        \
-       TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0)         \
+       TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3)            \
+       TESTCASE_END
+
+#define TEST_BF_X(code, codex)                 \
+       TESTCASE_START(code)                    \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_FX(code, codex)             \
+       TESTCASE_END
+
+#define TEST_BB_X(code, codex)                 \
+       TESTCASE_START(code)                    \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_BX(code, codex)             \
+       TESTCASE_END
+
+#define TEST_BF_RX(code1, reg, val, code2, codex)      \
+       TESTCASE_START(code1 #reg code2)                \
+       TEST_ARG_REG(reg, val)                          \
+       TEST_ARG_END("")                                \
+       TEST_BRANCH_FX(code1 #reg code2, codex)         \
        TESTCASE_END
 
 #define TEST_X(code, codex)                    \
@@ -372,6 +393,25 @@ struct test_arg_end {
        TESTCASE_END
 
 
+/*
+ * Macros for defining space directives spread over multiple lines.
+ * These are required so the compiler guesses better the length of inline asm
+ * code and will spill the literal pool early enough to avoid generating PC
+ * relative loads with out of range offsets.
+ */
+#define TWICE(x)       x x
+#define SPACE_0x8      TWICE(".space 4\n\t")
+#define SPACE_0x10     TWICE(SPACE_0x8)
+#define SPACE_0x20     TWICE(SPACE_0x10)
+#define SPACE_0x40     TWICE(SPACE_0x20)
+#define SPACE_0x80     TWICE(SPACE_0x40)
+#define SPACE_0x100    TWICE(SPACE_0x80)
+#define SPACE_0x200    TWICE(SPACE_0x100)
+#define SPACE_0x400    TWICE(SPACE_0x200)
+#define SPACE_0x800    TWICE(SPACE_0x400)
+#define SPACE_0x1000   TWICE(SPACE_0x800)
+
+
 /* Various values used in test cases... */
 #define N(val) (val ^ 0xffffffff)
 #define VAL1   0x12345678
index c1b4463dcc839c781004080d7426e78771bad15a..e59bbd496c39174da0a6ee4094fe6f717df97a67 100644 (file)
@@ -32,24 +32,6 @@ static atomic_t waiting_for_crash_ipi;
 
 int machine_kexec_prepare(struct kimage *image)
 {
-       unsigned long page_list;
-       void *reboot_code_buffer;
-       page_list = image->head & PAGE_MASK;
-
-       reboot_code_buffer = page_address(image->control_code_page);
-
-       /* Prepare parameters for reboot_code_buffer*/
-       kexec_start_address = image->start;
-       kexec_indirection_page = page_list;
-       kexec_mach_type = machine_arch_type;
-       kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
-
-       /* copy our kernel relocation code to the control code page */
-       memcpy(reboot_code_buffer,
-              relocate_new_kernel, relocate_new_kernel_size);
-
-       flush_icache_range((unsigned long) reboot_code_buffer,
-                          (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
        return 0;
 }
 
@@ -100,14 +82,31 @@ void (*kexec_reinit)(void);
 
 void machine_kexec(struct kimage *image)
 {
+       unsigned long page_list;
        unsigned long reboot_code_buffer_phys;
        void *reboot_code_buffer;
 
+
+       page_list = image->head & PAGE_MASK;
+
        /* we need both effective and real address here */
        reboot_code_buffer_phys =
            page_to_pfn(image->control_code_page) << PAGE_SHIFT;
        reboot_code_buffer = page_address(image->control_code_page);
 
+       /* Prepare parameters for reboot_code_buffer*/
+       kexec_start_address = image->start;
+       kexec_indirection_page = page_list;
+       kexec_mach_type = machine_arch_type;
+       kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
+
+       /* copy our kernel relocation code to the control code page */
+       memcpy(reboot_code_buffer,
+              relocate_new_kernel, relocate_new_kernel_size);
+
+
+       flush_icache_range((unsigned long) reboot_code_buffer,
+                          (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
        printk(KERN_INFO "Bye!\n");
 
        if (kexec_reinit)
index 24e2347be6b1043ad7cf87f70ab88fe6e9a270c9..88b0941ce51ec674bbf46d604fcbb46f5dc25255 100644 (file)
@@ -343,19 +343,25 @@ validate_group(struct perf_event *event)
 {
        struct perf_event *sibling, *leader = event->group_leader;
        struct pmu_hw_events fake_pmu;
+       DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
 
-       memset(&fake_pmu, 0, sizeof(fake_pmu));
+       /*
+        * Initialise the fake PMU. We only need to populate the
+        * used_mask for the purposes of validation.
+        */
+       memset(fake_used_mask, 0, sizeof(fake_used_mask));
+       fake_pmu.used_mask = fake_used_mask;
 
        if (!validate_event(&fake_pmu, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_pmu, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_pmu, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
        int i, err, irq, irqs;
        struct platform_device *pmu_device = armpmu->plat_device;
 
+       if (!pmu_device)
+               return -ENODEV;
+
        err = reserve_pmu(armpmu->type);
        if (err) {
                pr_warning("unable to reserve pmu\n");
@@ -631,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
 
 static int __devinit armpmu_device_probe(struct platform_device *pdev)
 {
+       if (!cpu_pmu)
+               return -ENODEV;
+
        cpu_pmu->plat_device = pdev;
        return 0;
 }
index 2c3407ee857675242c874f49adaf9d26339f79bf..2334bf8a650a16a35d5849a4b6c309d5f19eb1f1 100644 (file)
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
 {
        clear_bit_unlock(type, pmu_lock);
 }
+EXPORT_SYMBOL_GPL(release_pmu);
index 75316f0dd02ae3b0be19e10a982f6a5c7e606ff3..3d0c6fb74ae4efe521cfc563ea11e0fa9738d465 100644 (file)
@@ -192,6 +192,9 @@ void cpu_idle(void)
 #endif
 
                        local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+                       wmb();
+#endif
                        if (hlt_counter) {
                                local_irq_enable();
                                cpu_relax();
index 7e7977ab994ff92ee4ded30ee728d92ed6c3a520..8fc2c8fcbdc646a4a8babecbf4a758f88e6d8d60 100644 (file)
@@ -461,8 +461,10 @@ static void __init setup_processor(void)
               cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
               proc_arch[cpu_architecture()], cr_alignment);
 
-       sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
-       sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
+       snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
+                list->arch_name, ENDIANNESS);
+       snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
+                list->elf_name, ENDIANNESS);
        elf_hwcap = list->elf_hwcap;
 #ifndef CONFIG_ARM_THUMB
        elf_hwcap &= ~HWCAP_THUMB;
@@ -893,8 +895,6 @@ void __init setup_arch(char **cmdline_p)
 {
        struct machine_desc *mdesc;
 
-       unwind_init();
-
        setup_processor();
        mdesc = setup_machine_fdt(__atags_pointer);
        if (!mdesc)
@@ -902,6 +902,12 @@ void __init setup_arch(char **cmdline_p)
        machine_desc = mdesc;
        machine_name = mdesc->name;
 
+#ifdef CONFIG_ZONE_DMA
+       if (mdesc->dma_zone_size) {
+               extern unsigned long arm_dma_zone_size;
+               arm_dma_zone_size = mdesc->dma_zone_size;
+       }
+#endif
        if (mdesc->soft_reboot)
                reboot_setup("s");
 
@@ -932,12 +938,6 @@ void __init setup_arch(char **cmdline_p)
 
        tcm_init();
 
-#ifdef CONFIG_ZONE_DMA
-       if (mdesc->dma_zone_size) {
-               extern unsigned long arm_dma_zone_size;
-               arm_dma_zone_size = mdesc->dma_zone_size;
-       }
-#endif
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
index 1040c00405d0f362916c77030249fff754eda5e8..8200deaa14f680b553bdea41652fac0aaa58e23a 100644 (file)
@@ -43,7 +43,7 @@
 
 struct cputopo_arm cpu_topology[NR_CPUS];
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
 {
        return &cpu_topology[cpu].core_sibling;
 }
index e7e8365795c3d3272a4ef56d34d41662acd608b8..00df012c46784ac8c510466f1a4124158c3f86a3 100644 (file)
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
 
 struct unwind_ctrl_block {
        unsigned long vrs[16];          /* virtual register set */
-       unsigned long *insn;            /* pointer to the current instructions word */
+       const unsigned long *insn;      /* pointer to the current instructions word */
        int entries;                    /* number of entries left to interpret */
        int byte;                       /* current byte number in the instructions word */
 };
@@ -83,8 +83,9 @@ enum regs {
        PC = 15
 };
 
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
 
 static DEFINE_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
 })
 
 /*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
  * guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
  */
-static struct unwind_idx *search_index(unsigned long addr,
-                                      struct unwind_idx *first,
-                                      struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+                                      const struct unwind_idx *start,
+                                      const struct unwind_idx *origin,
+                                      const struct unwind_idx *stop)
 {
-       pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+       unsigned long addr_prel31;
+
+       pr_debug("%s(%08lx, %p, %p, %p)\n",
+                       __func__, addr, start, origin, stop);
+
+       /*
+        * only search in the section with the matching sign. This way the
+        * prel31 numbers can be compared as unsigned longs.
+        */
+       if (addr < (unsigned long)start)
+               /* negative offsets: [start; origin) */
+               stop = origin;
+       else
+               /* positive offsets: [origin; stop) */
+               start = origin;
+
+       /* prel31 for address relavive to start */
+       addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
 
-       if (addr < first->addr) {
+       while (start < stop - 1) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+               /*
+                * As addr_prel31 is relative to start an offset is needed to
+                * make it relative to mid.
+                */
+               if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+                               mid->addr_offset)
+                       stop = mid;
+               else {
+                       /* keep addr_prel31 relative to start */
+                       addr_prel31 -= ((unsigned long)mid -
+                                       (unsigned long)start);
+                       start = mid;
+               }
+       }
+
+       if (likely(start->addr_offset <= addr_prel31))
+               return start;
+       else {
                pr_warning("unwind: Unknown symbol address %08lx\n", addr);
                return NULL;
-       } else if (addr >= last->addr)
-               return last;
+       }
+}
 
-       while (first < last - 1) {
-               struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+               const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+       pr_debug("%s(%p, %p)\n", __func__, start, stop);
+       while (start < stop) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
 
-               if (addr < mid->addr)
-                       last = mid;
+               if (mid->addr_offset >= 0x40000000)
+                       /* negative offset */
+                       start = mid + 1;
                else
-                       first = mid;
+                       /* positive offset */
+                       stop = mid;
        }
-
-       return first;
+       pr_debug("%s -> %p\n", __func__, stop);
+       return stop;
 }
 
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
 {
-       struct unwind_idx *idx = NULL;
+       const struct unwind_idx *idx = NULL;
        unsigned long flags;
 
        pr_debug("%s(%08lx)\n", __func__, addr);
 
-       if (core_kernel_text(addr))
+       if (core_kernel_text(addr)) {
+               if (unlikely(!__origin_unwind_idx))
+                       __origin_unwind_idx =
+                               unwind_find_origin(__start_unwind_idx,
+                                               __stop_unwind_idx);
+
                /* main unwind table */
                idx = search_index(addr, __start_unwind_idx,
-                                  __stop_unwind_idx - 1);
-       else {
+                                  __origin_unwind_idx,
+                                  __stop_unwind_idx);
+       } else {
                /* module unwind tables */
                struct unwind_table *table;
 
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
                        if (addr >= table->begin_addr &&
                            addr < table->end_addr) {
                                idx = search_index(addr, table->start,
-                                                  table->stop - 1);
+                                                  table->origin,
+                                                  table->stop);
                                /* Move-to-front to exploit common traces */
                                list_move(&table->list, &unwind_tables);
                                break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
 int unwind_frame(struct stackframe *frame)
 {
        unsigned long high, low;
-       struct unwind_idx *idx;
+       const struct unwind_idx *idx;
        struct unwind_ctrl_block ctrl;
 
        /* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
                                      unsigned long text_size)
 {
        unsigned long flags;
-       struct unwind_idx *idx;
        struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
 
        pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
        if (!tab)
                return tab;
 
-       tab->start = (struct unwind_idx *)start;
-       tab->stop = (struct unwind_idx *)(start + size);
+       tab->start = (const struct unwind_idx *)start;
+       tab->stop = (const struct unwind_idx *)(start + size);
+       tab->origin = unwind_find_origin(tab->start, tab->stop);
        tab->begin_addr = text_addr;
        tab->end_addr = text_addr + text_size;
 
-       /* Convert the symbol addresses to absolute values */
-       for (idx = tab->start; idx < tab->stop; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
        spin_lock_irqsave(&unwind_lock, flags);
        list_add_tail(&tab->list, &unwind_tables);
        spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
 
        kfree(tab);
 }
-
-int __init unwind_init(void)
-{
-       struct unwind_idx *idx;
-
-       /* Convert the symbol addresses to absolute values */
-       for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
-       pr_debug("unwind: ARM stack unwinding initialised\n");
-
-       return 0;
-}
index 10d868a5a48135840278022e153f3610b0b7a640..d6408d1ee543fe5e3ceabbcda01b25efb07676ba 100644 (file)
@@ -1,5 +1,9 @@
+#include <asm/unwind.h>
+
 #if __LINUX_ARM_ARCH__ >= 6
-       .macro  bitop, instr
+       .macro  bitop, name, instr
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        mov     r2, #1
        cmp     r0, #0
        bne     1b
        bx      lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 
-       .macro  testop, instr, store
+       .macro  testop, name, instr, store
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        mov     r2, #1
        cmp     r0, #0
        movne   r0, #1
 2:     bx      lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 #else
-       .macro  bitop, instr
+       .macro  bitop, name, instr
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        and     r2, r0, #31
@@ -49,6 +61,8 @@
        str     r2, [r1, r0, lsl #2]
        restore_irqs ip
        mov     pc, lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 
 /**
@@ -59,7 +73,9 @@
  * Note: we can trivially conditionalise the store instruction
  * to avoid dirtying the data cache.
  */
-       .macro  testop, instr, store
+       .macro  testop, name, instr, store
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        and     r3, r0, #31
@@ -73,5 +89,7 @@
        moveq   r0, #0
        restore_irqs ip
        mov     pc, lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 #endif
index 68ed5b62e83976d906bad4409fdabda385caaf96..f4027862172f8a4f1082ae06d7568615f3e5ad45 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_change_bit)
-       bitop   eor
-ENDPROC(_change_bit)
+bitop  _change_bit, eor
index 4c04c3b51eeb0d11bc3b755044b0a63ca6f0db1b..f6b75fb64d30557c5b22655adec3ba2bd34b0c07 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_clear_bit)
-       bitop   bic
-ENDPROC(_clear_bit)
+bitop  _clear_bit, bic
index bbee5c66a23e177494875e5db4fc19da97ed0d73..618fedae4b370aac8b65c2a39ae51046b0ff711a 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                .text
 
-ENTRY(_set_bit)
-       bitop   orr
-ENDPROC(_set_bit)
+bitop  _set_bit, orr
index 15a4d431f229440979aaf179fd423eb647b8da0f..4becdc3a59cbb60717bae345ec7b3f58243abdb6 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_test_and_change_bit)
-       testop  eor, str
-ENDPROC(_test_and_change_bit)
+testop _test_and_change_bit, eor, str
index 521b66b5b95da197fa661f142547414399a0af59..918841dcce7ad57ef5e880f1c09547404070fbe5 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_test_and_clear_bit)
-       testop  bicne, strne
-ENDPROC(_test_and_clear_bit)
+testop _test_and_clear_bit, bicne, strne
index 1c98cc2185bb0885ae0cac805193f52d34345608..8d1b2fe9e4873ba8d53ba75773a7e30e92292b7c 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_test_and_set_bit)
-       testop  orreq, streq
-ENDPROC(_test_and_set_bit)
+testop _test_and_set_bit, orreq, streq
index 66591fa53e057d59011a533a2037cfc976116790..ad930688358ca1c5683e984dc1b85799b582c5c0 100644 (file)
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index b84a9f642f5953a5ff527d3b593b1a628b61232b..0d20677fbef027591c91c2d442d528f7fa6c73f0 100644 (file)
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
-       CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
-       CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
-       CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+       CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+       CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
        /* more usart lookup table for DT entries */
        CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
index 25e3464fb07f1fabe1714d009efd9ef8bb4783e2..629fa977497239f171d66ef47563c3da9d2b0127 100644 (file)
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ae78f4d03b738851b5e0ef191c26997c34304d9a..a178b58b0b9c8d59850ca91f0ecab9203198bbcc 100644 (file)
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ad017eb1f8df4c2ff9514cae10e0d555269ac09c..d5fbac9ff4faed0da1c112869b5c5144c4cfe2f6 100644 (file)
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index 8f4866045b41e213172c761f522ffca720a5802e..ec164a4124c9b27aa0c3b72325dc91ec13ddc673 100644 (file)
@@ -19,7 +19,7 @@
 #define BOARD_HAVE_NAND_16BIT  (1 << 31)
 static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_16BIT;
+       return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index 43eadbcc29ede940fd5c4035c2b6abacbcdb2a3b..430da120a297fe08620eaba672c65e283171bd99 100644 (file)
@@ -235,7 +235,7 @@ void __init bcmring_init_timer(void)
         */
        bcmring_clocksource_init();
 
-       sp804_clockevents_register(TIMER0_VA_BASE, IRQ_TIMER0, "timer0");
+       sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMER0, "timer0");
 }
 
 struct sys_timer bcmring_timer = {
index b52b8de91bde72c2a4847c1a37a44f425974612d..f4d4d6d174d06e9c049756de6089e02e790abe8a 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/mm.h>
 #include <linux/pfn.h>
 #include <linux/atomic.h>
+#include <linux/sched.h>
 #include <mach/dma.h>
 
 /* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
index 1d7d2499522674f94143771e84aa26f323869ac7..6659a90dbcadafffdc6ae4988f1a1c6148a955b5 100644 (file)
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
        .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
        .tdm_slots      = 2,
        .serial_dir     = da850_iis_serializer_direction,
-       .asp_chan_q     = EVENTQ_1,
+       .asp_chan_q     = EVENTQ_0,
        .version        = MCASP_VERSION_2,
        .txnumevt       = 1,
        .rxnumevt       = 1,
index 1918ae711428b3d5b9cf2556c212bfc156f90664..46e1f4173b9735c622c8a95c5c9a605782c1eda6 100644 (file)
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
                /* UBL (a few copies) plus U-Boot */
                .name           = "bootloader",
                .offset         = 0,
-               .size           = 28 * NAND_BLOCK_SIZE,
+               .size           = 30 * NAND_BLOCK_SIZE,
                .mask_flags     = MTD_WRITEABLE, /* force read-only */
        }, {
                /* U-Boot environment */
index e574d7f837a850e4ddd5efb02f102c47d4e10cd9..635bf7740157bb7ea88b94580c98ead839f44068 100644 (file)
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
        int val;
        u32 value;
 
-       if (!vpif_vsclkdis_reg || !cpld_client)
+       if (!vpif_vidclkctl_reg || !cpld_client)
                return -ENXIO;
 
        val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                return val;
 
        spin_lock_irqsave(&vpif_reg_lock, flags);
-       value = __raw_readl(vpif_vsclkdis_reg);
+       value = __raw_readl(vpif_vidclkctl_reg);
        if (mux_mode) {
                val &= VPIF_INPUT_TWO_CHANNEL;
                value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                val |= VPIF_INPUT_ONE_CHANNEL;
                value &= ~VIDCH1CLK;
        }
-       __raw_writel(value, vpif_vsclkdis_reg);
+       __raw_writel(value, vpif_vidclkctl_reg);
        spin_unlock_irqrestore(&vpif_reg_lock, flags);
 
        err = i2c_smbus_write_byte(cpld_client, val);
index 0b68ed534f8e6d3d0a16effc5cc4811a8f94f85d..af27c130595fb6897cb104253ad157f567d53f04 100644 (file)
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
        .name = "dsp",
        .parent = &pll1_sysclk1,
        .lpsc = DM646X_LPSC_C64X_CPU,
-       .flags = PSC_DSP,
        .usecount = 1,                  /* REVISIT how to disable? */
 };
 
index fa59c097223dc85ec965224e9ebfc0ac8dccf2c2..8bc3fc2561711801610461c46b803a7fbfbfde9a 100644 (file)
 #define PTCMD          0x120
 #define PTSTAT         0x128
 #define PDSTAT         0x200
-#define PDCTL1         0x304
+#define PDCTL          0x300
 #define MDSTAT         0x800
 #define MDCTL          0xA00
 
 #define PSC_STATE_ENABLE       3
 
 #define MDSTAT_STATE_MASK      0x3f
+#define PDSTAT_STATE_MASK      0x1f
 #define MDCTL_FORCE            BIT(31)
+#define PDCTL_NEXT             BIT(1)
+#define PDCTL_EPCGOOD          BIT(8)
 
 #ifndef __ASSEMBLER__
 
index 1fb6bdff38c1f5e9ff796e72c4c57be824919209..d7e210f4b55c85d7e2a2fc4831fb3f0a77f72990 100644 (file)
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
 void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                unsigned int id, bool enable, u32 flags)
 {
-       u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+       u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
        void __iomem *psc_base;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
        u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                mdctl |= MDCTL_FORCE;
        __raw_writel(mdctl, psc_base + MDCTL + 4 * id);
 
-       pdstat = __raw_readl(psc_base + PDSTAT);
-       if ((pdstat & 0x00000001) == 0) {
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x1;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+       pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+       if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_NEXT;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
 
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                        epcpr = __raw_readl(psc_base + EPCPR);
                } while ((((epcpr >> domain) & 1) == 0));
 
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x100;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_EPCGOOD;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
        } else {
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
index 90ec247f3b375f498e20f46c20e25fe2c943b54a..cc8d4bd6d0f71666f4fe369dab28908bbd7af741 100644 (file)
@@ -110,11 +110,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
                .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC0),
                .length         = SZ_4K,
                .type           = MT_DEVICE,
-       }, {
-               .virtual        = (unsigned long)S5P_VA_SROMC,
-               .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
        }, {
                .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
                .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
index 35f6502144ae14c9ffa989066f51add9ee084375..4ebb382c597918e1053a221b35bd52a9e9d1b39a 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/init.h>
 #include <linux/cpuidle.h>
 #include <linux/io.h>
+#include <linux/export.h>
+#include <linux/time.h>
 
 #include <asm/proc-fns.h>
 
index 97343df8f13227c371f8a906f371a7eb1d7c3c74..85b5527d0918e4bea1ca7b9abea9362dd64c357d 100644 (file)
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
        char name[10];
 };
 
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
 static void exynos4_mct_write(unsigned int value, void *addr)
 {
        void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
 }
 
 #ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
 /* Clock event handling */
 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
 {
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
 void local_timer_stop(struct clock_event_device *evt)
 {
+       unsigned int cpu = smp_processor_id();
        evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
        if (mct_int_type == MCT_INT_SPI)
-               disable_irq(evt->irq);
+               if (cpu == 0)
+                       remove_irq(evt->irq, &mct_tick0_event_irq);
+               else
+                       remove_irq(evt->irq, &mct_tick1_event_irq);
        else
                disable_percpu_irq(IRQ_MCT_LOCALTIMER);
 }
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
 
        clk_rate = clk_get_rate(mct_clk);
 
+#ifdef CONFIG_LOCAL_TIMERS
        if (mct_int_type == MCT_INT_PPI) {
                int err;
 
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     IRQ_MCT_LOCALTIMER, err);
        }
+#endif /* CONFIG_LOCAL_TIMERS */
 }
 
 static void __init exynos4_timer_init(void)
index b82dcf08e747e1052ea13b4a11feef25b7f7775b..88660d500f5be259bde8bd94389805881cf8975e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
+#include <linux/smp.h>
 
 #include <asm/cacheflush.h>
 #include <asm/unified.h>
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void)
 
 void highbank_set_cpu_jump(int cpu, void *jump_addr)
 {
+#ifdef CONFIG_SMP
+       cpu = cpu_logical_map(cpu);
+#endif
        writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
        __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
        outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
index 5f7f9c2a34aec39cdd4f312c326cd0005d5ea027..0e6f1af260b651b1894a9203f449ae8ecb5cac26 100644 (file)
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC
 config HAVE_IMX_SRC
        bool
 
-#
-# ARCH_MX31 and ARCH_MX35 are left for compatibility
-# Some usages assume that having one of them implies not having (e.g.) ARCH_MX2.
-# To easily distinguish good and reviewed from unreviewed usages new (and IMHO
-# more sensible) names are used: SOC_IMX31 and SOC_IMX35
 config ARCH_MX1
        bool
 
@@ -27,12 +22,6 @@ config ARCH_MX25
 config MACH_MX27
        bool
 
-config ARCH_MX31
-       bool
-
-config ARCH_MX35
-       bool
-
 config SOC_IMX1
        bool
        select ARCH_MX1
@@ -72,7 +61,6 @@ config SOC_IMX31
        select CPU_V6
        select IMX_HAVE_PLATFORM_MXC_RNGA
        select ARCH_MXC_AUDMUX_V2
-       select ARCH_MX31
        select MXC_AVIC
        select SMP_ON_UP if SMP
 
@@ -82,7 +70,6 @@ config SOC_IMX35
        select ARCH_MXC_IOMUX_V3
        select ARCH_MXC_AUDMUX_V2
        select HAVE_EPIT
-       select ARCH_MX35
        select MXC_AVIC
        select SMP_ON_UP if SMP
 
@@ -145,7 +132,7 @@ config MACH_MX25_3DS
        select IMX_HAVE_PLATFORM_MXC_NAND
        select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 
-config MACH_EUKREA_CPUIMX25
+config MACH_EUKREA_CPUIMX25SD
        bool "Support Eukrea CPUIMX25 Platform"
        select SOC_IMX25
        select IMX_HAVE_PLATFORM_FLEXCAN
@@ -161,7 +148,7 @@ config MACH_EUKREA_CPUIMX25
 
 choice
        prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX25
+       depends on MACH_EUKREA_CPUIMX25SD
        default MACH_EUKREA_MBIMXSD25_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD25_BASEBOARD
@@ -555,7 +542,7 @@ config MACH_MX35_3DS
          Include support for MX35PDK platform. This includes specific
          configurations for the board and its peripherals.
 
-config MACH_EUKREA_CPUIMX35
+config MACH_EUKREA_CPUIMX35SD
        bool "Support Eukrea CPUIMX35 Platform"
        select SOC_IMX35
        select IMX_HAVE_PLATFORM_FLEXCAN
@@ -573,7 +560,7 @@ config MACH_EUKREA_CPUIMX35
 
 choice
        prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX35
+       depends on MACH_EUKREA_CPUIMX35SD
        default MACH_EUKREA_MBIMXSD35_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD35_BASEBOARD
index aba73214c2a8cd640e0f4371caf4ce99ac53dc0f..d97f409ce98be4a9d796dc21d3dc30fb9c7de9c7 100644 (file)
@@ -24,7 +24,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
 
 # i.MX25 based machines
 obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
 
 # i.MX27 based machines
@@ -57,7 +57,7 @@ obj-$(CONFIG_MACH_BUG) += mach-bug.o
 # i.MX35 based machines
 obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
 obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o
 obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o
 
index 8116f119517d8065ca0653a603863275e29be71b..ac8238caecb98a98bf6c326e267fae24d88bb861 100644 (file)
@@ -507,7 +507,7 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int cgr2 = 3 << 26, cgr3 = 0;
+       unsigned int cgr2 = 3 << 26;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
        cgr2 |= 3 << 16;
@@ -521,6 +521,12 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       clk_enable(&iim_clk);
+       imx_print_silicon_rev("i.MX35", mx35_revision());
+       clk_disable(&iim_clk);
 
        /*
         * Check if we came up in internal boot mode. If yes, we need some
@@ -529,17 +535,11 @@ int __init mx35_clocks_init()
         */
        if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
                /* Additionally turn on UART1, SCC, and IIM clocks */
-               cgr2 |= 3 << 16 | 3 << 4;
-               cgr3 |= 3 << 2;
+               clk_enable(&iim_clk);
+               clk_enable(&uart1_clk);
+               clk_enable(&scc_clk);
        }
 
-       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
-       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
-
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX35", mx35_revision());
-       clk_disable(&iim_clk);
-
 #ifdef CONFIG_MXC_USE_EPIT
        epit_timer_init(&epit1_clk,
                        MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
index 613a1b993bff9f7fbf9f71595b5ffa8c74922d4d..039a7abb165a35f4afc10ae08dbb7cc62f1121f7 100644 (file)
@@ -1953,14 +1953,17 @@ static struct map_desc imx6q_clock_desc[] = {
        imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
 };
 
+void __init imx6q_clock_map_io(void)
+{
+       iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
+}
+
 int __init mx6q_clocks_init(void)
 {
        struct device_node *np;
        void __iomem *base;
        int i, irq;
 
-       iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-
        /* retrieve the freqency of fixed clocks from device tree */
        for_each_compatible_node(np, NULL, "fixed-clock") {
                u32 rate;
index 66af2e8f7e576dffd5372862653d397d22811069..362aae780601efc41e346a8a7beade651f610d83 100644 (file)
@@ -53,12 +53,18 @@ static const struct imxi2c_platform_data
        .bitrate =              100000,
 };
 
+#define TSC2007_IRQGPIO                IMX_GPIO_NR(3, 2)
+static int tsc2007_get_pendown_state(void)
+{
+       return !gpio_get_value(TSC2007_IRQGPIO);
+}
+
 static struct tsc2007_platform_data tsc2007_info = {
        .model                  = 2007,
        .x_plate_ohms           = 180,
+       .get_pendown_state = tsc2007_get_pendown_state,
 };
 
-#define TSC2007_IRQGPIO                IMX_GPIO_NR(3, 2)
 static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
        {
                I2C_BOARD_INFO("pcf8563", 0x51),
index 8bf5fa349484e2da42a4c6deea64bc042b1263e3..8deb012189b5a7e185f7299a8ad8c8503edad587 100644 (file)
@@ -34,16 +34,18 @@ static void __init imx6q_map_io(void)
 {
        imx_lluart_map_io();
        imx_scu_map_io();
+       imx6q_clock_map_io();
 }
 
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx6q gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx6q_irq_match[] __initconst = {
index 9f0e82ec3398dfcf7e83c8df246e54cdf53b9503..31807d2a8b7bf1a65d57c8f4cb748bb9e6697934 100644 (file)
 static void imx3_idle(void)
 {
        unsigned long reg = 0;
-       __asm__ __volatile__(
-               /* disable I and D cache */
-               "mrc p15, 0, %0, c1, c0, 0\n"
-               "bic %0, %0, #0x00001000\n"
-               "bic %0, %0, #0x00000004\n"
-               "mcr p15, 0, %0, c1, c0, 0\n"
-               /* invalidate I cache */
-               "mov %0, #0\n"
-               "mcr p15, 0, %0, c7, c5, 0\n"
-               /* clear and invalidate D cache */
-               "mov %0, #0\n"
-               "mcr p15, 0, %0, c7, c14, 0\n"
-               /* WFI */
-               "mov %0, #0\n"
-               "mcr p15, 0, %0, c7, c0, 4\n"
-               "nop\n" "nop\n" "nop\n" "nop\n"
-               "nop\n" "nop\n" "nop\n"
-               /* enable I and D cache */
-               "mrc p15, 0, %0, c1, c0, 0\n"
-               "orr %0, %0, #0x00001000\n"
-               "orr %0, %0, #0x00000004\n"
-               "mcr p15, 0, %0, c1, c0, 0\n"
-               : "=r" (reg));
+
+       if (!need_resched())
+               __asm__ __volatile__(
+                       /* disable I and D cache */
+                       "mrc p15, 0, %0, c1, c0, 0\n"
+                       "bic %0, %0, #0x00001000\n"
+                       "bic %0, %0, #0x00000004\n"
+                       "mcr p15, 0, %0, c1, c0, 0\n"
+                       /* invalidate I cache */
+                       "mov %0, #0\n"
+                       "mcr p15, 0, %0, c7, c5, 0\n"
+                       /* clear and invalidate D cache */
+                       "mov %0, #0\n"
+                       "mcr p15, 0, %0, c7, c14, 0\n"
+                       /* WFI */
+                       "mov %0, #0\n"
+                       "mcr p15, 0, %0, c7, c0, 4\n"
+                       "nop\n" "nop\n" "nop\n" "nop\n"
+                       "nop\n" "nop\n" "nop\n"
+                       /* enable I and D cache */
+                       "mrc p15, 0, %0, c1, c0, 0\n"
+                       "orr %0, %0, #0x00001000\n"
+                       "orr %0, %0, #0x00000004\n"
+                       "mcr p15, 0, %0, c1, c0, 0\n"
+                       : "=r" (reg));
+       local_irq_enable();
 }
 
 static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void)
        l2x0_init(l2x0_base, 0x00030024, 0x00000000);
 }
 
+#ifdef CONFIG_SOC_IMX31
 static struct map_desc mx31_io_desc[] __initdata = {
        imx_map_entry(MX31, X_MEMC, MT_DEVICE),
        imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED),
@@ -126,33 +130,11 @@ void __init mx31_map_io(void)
        iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
 }
 
-static struct map_desc mx35_io_desc[] __initdata = {
-       imx_map_entry(MX35, X_MEMC, MT_DEVICE),
-       imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
-       imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
-       imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
-       imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
-};
-
-void __init mx35_map_io(void)
-{
-       iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
-}
-
 void __init imx31_init_early(void)
 {
        mxc_set_cpu_type(MXC_CPU_MX31);
        mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
-       imx_idle = imx3_idle;
-       imx_ioremap = imx3_ioremap;
-}
-
-void __init imx35_init_early(void)
-{
-       mxc_set_cpu_type(MXC_CPU_MX35);
-       mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
-       mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
-       imx_idle = imx3_idle;
+       pm_idle = imx3_idle;
        imx_ioremap = imx3_ioremap;
 }
 
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void)
        mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
 }
 
-void __init mx35_init_irq(void)
-{
-       mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
-}
-
 static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = {
        .per_2_per_addr = 1677,
 };
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void)
 
        imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata);
 }
+#endif /* ifdef CONFIG_SOC_IMX31 */
+
+#ifdef CONFIG_SOC_IMX35
+static struct map_desc mx35_io_desc[] __initdata = {
+       imx_map_entry(MX35, X_MEMC, MT_DEVICE),
+       imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
+       imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
+       imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
+       imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
+};
+
+void __init mx35_map_io(void)
+{
+       iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
+}
+
+void __init imx35_init_early(void)
+{
+       mxc_set_cpu_type(MXC_CPU_MX35);
+       mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
+       mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
+       pm_idle = imx3_idle;
+       imx_ioremap = imx3_ioremap;
+}
+
+void __init mx35_init_irq(void)
+{
+       mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
+}
 
 static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = {
        .ap_2_ap_addr = 642,
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void)
 
        imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata);
 }
+#endif /* ifdef CONFIG_SOC_IMX35 */
index 36cacbd0dcc2fa8c5d98bd5cb1e141a9d30e3503..a8e33681b73251f498a7cfee463f20c3f6d0e1ff 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/smp.h>
 #include <asm/unified.h>
 
 #define SRC_SCR                                0x000
 
 static void __iomem *src_base;
 
+#ifndef CONFIG_SMP
+#define cpu_logical_map(cpu)           0
+#endif
+
 void imx_enable_cpu(int cpu, bool enable)
 {
        u32 mask, val;
 
+       cpu = cpu_logical_map(cpu);
        mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
        val = readl_relaxed(src_base + SRC_SCR);
        val = enable ? val | mask : val & ~mask;
@@ -35,6 +41,7 @@ void imx_enable_cpu(int cpu, bool enable)
 
 void imx_set_cpu_jump(int cpu, void *jump_addr)
 {
+       cpu = cpu_logical_map(cpu);
        writel_relaxed(BSYM(virt_to_phys(jump_addr)),
                       src_base + SRC_GPR1 + cpu * 8);
 }
index 69156568bc41f95891df3c17ca06836a9c3bf5bb..4665767a4f79ee918ec1fd24053d296ce13d0806 100644 (file)
@@ -182,7 +182,7 @@ static void __init gplugd_init(void)
 
        /* on-chip devices */
        pxa168_add_uart(3);
-       pxa168_add_ssp(0);
+       pxa168_add_ssp(1);
        pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info));
 
        pxa168_add_eth(&gplugd_eth_platform_data);
index d14eeaf163226d3b2ff53c394976be090b1e4d62..99b4ce1b6562cebf64651b9a0039599d624593d6 100644 (file)
@@ -7,7 +7,7 @@
 #define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
 
 #define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x)    (GPIO_REGS_VIRT + (x))
+#define GPIO_REG(x)    (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
 
 #define NR_BUILTIN_GPIO                IRQ_GPIO_NUM
 
index 24030d0da6e3c59ec9ddfa2a76995001f747bc62..0fb7a17df3987e259297d21de210971d2f65fb80 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/bootmem.h>
+#include <linux/module.h>
 #include <mach/irqs.h>
 #include <mach/iommu.h>
 
index 5c837603ff0fc3b6a173ab677bdfda2a75bec09b..24994bb521475b7ac728dc9a0616abbea7586ca7 100644 (file)
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
 {
        iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
        iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
-               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
 
        imx51_soc_init();
 
index 6bea31ab8f8581ee637a7f9a998157cd982a1b1b..64bbfcea6f350cd781bbe875e29a2d63bc8a3b61 100644 (file)
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
        gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 7678f7734db631ab5163bc1144520311e2666e18..237bdecd933180f7e8579ccb08e458064a5ca8fa 100644 (file)
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
        gpio_set_value(LOCO_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 59c0845eb4a6321badc4f6b1f58943e64c68a33e..d42132a80e8ffc8e7d4e5f34c99fe8f83da984c6 100644 (file)
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
        gpio_set_value(SMD_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 5c5328257dca2f5560ccaa33bf98a27b80582b27..5e2e7a8438606f43015d90115046e555eee1a3c5 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <mach/hardware.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 static int mx5_cpu_rev = -1;
 
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void)
        if (!cpu_is_mx51())
                return 0;
 
-       if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) {
+       if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
+                       (elf_hwcap & HWCAP_NEON)) {
                elf_hwcap &= ~HWCAP_NEON;
                pr_info("Turning off NEON support, detected broken NEON implementation\n");
        }
index ccc61585659bdb17d55bb91bfc1cbb2ebf6cca1c..596edd967dbfef9a21b009b327f42cdda3ad60b0 100644 (file)
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 4; /* imx51 gets 4 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx51_irq_match[] __initconst = {
index ccaa0b81b7683f86b7750ca852f6b8b67d80eabd..85bfd5ff21b0bb925583679260321fbc7e25731b 100644 (file)
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx53 gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx53_irq_match[] __initconst = {
index 26eacc9d0d90fbf88930bd6a009c58efe6914c70..df4a508f240a04a47ce8bb5abbf6074f843c8aae 100644 (file)
@@ -23,7 +23,9 @@
 
 static void imx5_idle(void)
 {
-       mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+       if (!need_resched())
+               mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+       local_irq_enable();
 }
 
 /*
@@ -89,7 +91,7 @@ void __init imx51_init_early(void)
        mxc_set_cpu_type(MXC_CPU_MX51);
        mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
        mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
-       imx_idle = imx5_idle;
+       pm_idle = imx5_idle;
 }
 
 void __init imx53_init_early(void)
index 229ae3494216da2920bddb5164d66ce523a6ddeb..da6e4aad177c2097b12515b2ddd2572e8b397ea2 100644 (file)
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate)             \
        reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
        reg &= ~BM_CLKCTRL_##dr##_DIV;                                  \
        reg |= div << BP_CLKCTRL_##dr##_DIV;                            \
-       if (reg | (1 << clk->enable_shift)) {                           \
+       if (reg & (1 << clk->enable_shift)) {                           \
                pr_err("%s: clock is gated\n", __func__);               \
                return -EINVAL;                                         \
        }                                                               \
index 75d86118b76a2c524cea92c7320d25bb7d4225a5..30c7990f3c01d65c1f05ad9d425a090381661c9b 100644 (file)
 #define MX28_INT_CAN1                  9
 #define MX28_INT_LRADC_TOUCH           10
 #define MX28_INT_HSADC                 13
-#define MX28_INT_IRADC_THRESH0         14
-#define MX28_INT_IRADC_THRESH1         15
+#define MX28_INT_LRADC_THRESH0         14
+#define MX28_INT_LRADC_THRESH1         15
 #define MX28_INT_LRADC_CH0             16
 #define MX28_INT_LRADC_CH1             17
 #define MX28_INT_LRADC_CH2             18
index 0d2d2b470998a9d2cfac229654a9ee2135272a2f..bde5f6634747c639af514ecf9d699b1176891802 100644 (file)
@@ -30,6 +30,7 @@
  */
 #define cpu_is_mx23()          (                                       \
                machine_is_mx23evk() ||                                 \
+               machine_is_stmp378x() ||                                \
                0)
 #define cpu_is_mx28()          (                                       \
                machine_is_mx28evk() ||                                 \
index 3b1681e4f49a1ae633e5ac4b3d31137f2f92b014..6b00577b70256254e29951bdf8828a2ecd613fdb 100644 (file)
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
 MACHINE_START(M28EVK, "DENX M28 EVK")
        .map_io         = mx28_map_io,
        .init_irq       = mx28_init_irq,
-       .init_machine   = m28evk_init,
        .timer          = &m28evk_timer,
+       .init_machine   = m28evk_init,
 MACHINE_END
index 177e53123a02e5b67f617e5031c047354703103c..6834dea38c04cce77e20bffc2baa3019327d9610 100644 (file)
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
 MACHINE_START(STMP378X, "STMP378X")
        .map_io         = mx23_map_io,
        .init_irq       = mx23_init_irq,
-       .init_machine   = stmp378x_dvb_init,
        .timer          = &stmp378x_dvb_timer,
+       .init_machine   = stmp378x_dvb_init,
 MACHINE_END
index 0fcff47009cf13ff53db9757e58c9dc0cbf5b927..9a7b08b2a92559caf1028df076040c5e5dc798ae 100644 (file)
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
        MX28_PAD_ENET0_CRS__ENET1_RX_EN,
 };
 
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index e0a028161ddee89a4119014ed83420bd0a21ad4e..73f287d6429b629d57f7c093645962ce48cee318 100644 (file)
@@ -171,14 +171,6 @@ config MACH_OMAP_GENERIC
 comment "OMAP CPU Speed"
        depends on ARCH_OMAP1
 
-config OMAP_CLOCKS_SET_BY_BOOTLOADER
-       bool "OMAP clocks set by bootloader"
-       depends on ARCH_OMAP1
-       help
-         Enable this option to prevent the kernel from overriding the clock
-         frequencies programmed by bootloader for MPU, DSP, MMUs, TC,
-         internal LCD controller and MPU peripherals.
-
 config OMAP_ARM_216MHZ
        bool "OMAP ARM 216 MHz CPU (1710 only)"
         depends on ARCH_OMAP1 && ARCH_OMAP16XX
index 51bae31cf361289e5f2711ad46a41afd4b01b09c..b0f15d234a12b4ad9dd1beb34da7df187e3f86b4 100644 (file)
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void)
        omap_cfg_reg(J19_1610_CAM_D6);
        omap_cfg_reg(J18_1610_CAM_D7);
 
-       iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
-
        omap_board_config = ams_delta_config;
        omap_board_config_size = ARRAY_SIZE(ams_delta_config);
        omap_serial_init();
@@ -373,10 +371,16 @@ static int __init ams_delta_modem_init(void)
 }
 arch_initcall(ams_delta_modem_init);
 
+static void __init ams_delta_map_io(void)
+{
+       omap15xx_map_io();
+       iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
+}
+
 MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
        /* Maintainer: Jonathan McDowell <noodles@earth.li> */
        .atag_offset    = 0x100,
-       .map_io         = omap15xx_map_io,
+       .map_io         = ams_delta_map_io,
        .init_early     = omap1_init_early,
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
index eaf09efb91caec613eeb73e961e7fbd77a93cf21..16b1423b454a32dea7197460a393b12a4bd4e2b0 100644 (file)
@@ -17,7 +17,8 @@
 
 #include <plat/clock.h>
 
-extern int __init omap1_clk_init(void);
+int omap1_clk_init(void);
+void omap1_clk_late_init(void);
 extern int omap1_clk_enable(struct clk *clk);
 extern void omap1_clk_disable(struct clk *clk);
 extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
index 92400b9eb69f10c419c7e1295e238d56d5ef6110..9ff90a744a2140a0bf5168403301ac9cec68b927 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
 #include <linux/io.h>
 
 #include <asm/mach-types.h>  /* for machine_is_* */
@@ -767,6 +769,15 @@ static struct clk_functions omap1_clk_functions = {
        .clk_disable_unused     = omap1_clk_disable_unused,
 };
 
+static void __init omap1_show_rates(void)
+{
+       pr_notice("Clocking rate (xtal/DPLL1/MPU): "
+                       "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+               ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+               ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+               arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+}
+
 int __init omap1_clk_init(void)
 {
        struct omap_clk *c;
@@ -835,9 +846,12 @@ int __init omap1_clk_init(void)
        /* We want to be in syncronous scalable mode */
        omap_writew(0x1000, ARM_SYSST);
 
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
-       /* Use values set by bootloader. Determine PLL rate and recalculate
-        * dependent clocks as if kernel had changed PLL or divisors.
+
+       /*
+        * Initially use the values set by bootloader. Determine PLL rate and
+        * recalculate dependent clocks as if kernel had changed PLL or
+        * divisors. See also omap1_clk_late_init() that can reprogram dpll1
+        * after the SRAM is initialized.
         */
        {
                unsigned pll_ctl_val = omap_readw(DPLL_CTL);
@@ -862,25 +876,10 @@ int __init omap1_clk_init(void)
                        }
                }
        }
-#else
-       /* Find the highest supported frequency and enable it */
-       if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
-               printk(KERN_ERR "System frequencies not set. Check your config.\n");
-               /* Guess sane values (60MHz) */
-               omap_writew(0x2290, DPLL_CTL);
-               omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
-               ck_dpll1.rate = 60000000;
-       }
-#endif
        propagate_rate(&ck_dpll1);
        /* Cache rates for clocks connected to ck_ref (not dpll1) */
        propagate_rate(&ck_ref);
-       printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
-               "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
-              ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
-              ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
-              arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
+       omap1_show_rates();
        if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
                /* Select slicer output as OMAP input clock */
                omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -925,3 +924,27 @@ int __init omap1_clk_init(void)
 
        return 0;
 }
+
+#define OMAP1_DPLL1_SANE_VALUE 60000000
+
+void __init omap1_clk_late_init(void)
+{
+       unsigned long rate = ck_dpll1.rate;
+
+       if (rate >= OMAP1_DPLL1_SANE_VALUE)
+               return;
+
+       /* System booting at unusable rate, force reprogramming of DPLL1 */
+       ck_dpll1_p->rate = 0;
+
+       /* Find the highest supported frequency and enable it */
+       if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+               pr_err("System frequencies not set, using default. Check your config.\n");
+               omap_writew(0x2290, DPLL_CTL);
+               omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
+               ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
+       }
+       propagate_rate(&ck_dpll1);
+       omap1_show_rates();
+       loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
+}
index 48ef9888e820e5d95ecbd61274d2904a712ce63f..475cb2f50d872f326325991b76d0c5348aa5cfd2 100644 (file)
@@ -30,6 +30,8 @@
 #include <plat/omap7xx.h>
 #include <plat/mcbsp.h>
 
+#include "clock.h"
+
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void)
                return -ENODEV;
 
        omap_sram_init();
+       omap1_clk_late_init();
 
        /* please keep these calls, and their implementations above,
         * in alphabetical order so they're easier to sort through.
index 503414718905862d12e104b7940bfc7dc1524971..e1293aa513d338fe19ffd7990b189109b7ca0787 100644 (file)
@@ -334,6 +334,7 @@ config MACH_OMAP4_PANDA
 config OMAP3_EMU
        bool "OMAP3 debugging peripherals"
        depends on ARCH_OMAP3
+       select ARM_AMBA
        select OC_ETM
        help
          Say Y here to enable debugging hardware of omap3
index 69ab1c069134ccee42da01c015aea9d219408f4a..b009f17dee5606de2acbb1b7ba25c8938d0c5adb 100644 (file)
@@ -4,7 +4,7 @@
 
 # Common support
 obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
-        common.o gpio.o dma.o wd_timer.o
+        common.o gpio.o dma.o wd_timer.o display.o
 
 omap-2-3-common                                = irq.o sdrc.o
 hwmod-common                           = omap_hwmod.o \
@@ -264,7 +264,4 @@ smsc911x-$(CONFIG_SMSC911X)         := gpmc-smsc911x.o
 obj-y                                  += $(smsc911x-m) $(smsc911x-y)
 obj-$(CONFIG_ARCH_OMAP4)               += hwspinlock.o
 
-disp-$(CONFIG_OMAP2_DSS)               := display.o
-obj-y                                  += $(disp-m) $(disp-y)
-
 obj-y                                  += common-board-devices.o twl-common.o
index ba1aa07bdb29d325841b74da3117e76e60e5b668..c15c5c9c9085fa5e0a3eba9942ba57678b25db12 100644 (file)
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
 static void __init rx51_charger_init(void)
 {
        WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
-               GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+               GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
 
        platform_device_register(&rx51_charger_device);
 }
index 1fe35c24fba278ee57614be628ce2c16babb36a4..942bb4f19f9fd6df5af92b1b551cb1a3f3bb2313 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/sched.h>
 #include <linux/cpuidle.h>
+#include <linux/export.h>
 
 #include <plat/prcm.h>
 #include <plat/irqs.h>
index adb2756e242f104bc8dd35e969ba4957d3407b7a..dce9905d64bb6e1af6c0b80d012142ffdfca49ee 100644 (file)
 #include <plat/omap_hwmod.h>
 #include <plat/omap_device.h>
 #include <plat/omap-pm.h>
+#include <plat/common.h>
 
 #include "control.h"
+#include "display.h"
+
+#define DISPC_CONTROL          0x0040
+#define DISPC_CONTROL2         0x0238
+#define DISPC_IRQSTATUS                0x0018
+
+#define DSS_SYSCONFIG          0x10
+#define DSS_SYSSTATUS          0x14
+#define DSS_CONTROL            0x40
+#define DSS_SDI_CONTROL                0x44
+#define DSS_PLL_CONTROL                0x48
+
+#define LCD_EN_MASK            (0x1 << 0)
+#define DIGIT_EN_MASK          (0x1 << 1)
+
+#define FRAMEDONE_IRQ_SHIFT    0
+#define EVSYNC_EVEN_IRQ_SHIFT  2
+#define EVSYNC_ODD_IRQ_SHIFT   3
+#define FRAMEDONE2_IRQ_SHIFT   22
+#define FRAMEDONETV_IRQ_SHIFT  24
+
+/*
+ * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC
+ *     reset before deciding that something has gone wrong
+ */
+#define FRAMEDONE_IRQ_TIMEOUT          100
 
 static struct platform_device omap_display_device = {
        .name          = "omapdss",
@@ -172,3 +199,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
 
        return r;
 }
+
+static void dispc_disable_outputs(void)
+{
+       u32 v, irq_mask = 0;
+       bool lcd_en, digit_en, lcd2_en = false;
+       int i;
+       struct omap_dss_dispc_dev_attr *da;
+       struct omap_hwmod *oh;
+
+       oh = omap_hwmod_lookup("dss_dispc");
+       if (!oh) {
+               WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n");
+               return;
+       }
+
+       if (!oh->dev_attr) {
+               pr_err("display: could not disable outputs during reset due to missing dev_attr\n");
+               return;
+       }
+
+       da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr;
+
+       /* store value of LCDENABLE and DIGITENABLE bits */
+       v = omap_hwmod_read(oh, DISPC_CONTROL);
+       lcd_en = v & LCD_EN_MASK;
+       digit_en = v & DIGIT_EN_MASK;
+
+       /* store value of LCDENABLE for LCD2 */
+       if (da->manager_count > 2) {
+               v = omap_hwmod_read(oh, DISPC_CONTROL2);
+               lcd2_en = v & LCD_EN_MASK;
+       }
+
+       if (!(lcd_en | digit_en | lcd2_en))
+               return; /* no managers currently enabled */
+
+       /*
+        * If any manager was enabled, we need to disable it before
+        * DSS clocks are disabled or DISPC module is reset
+        */
+       if (lcd_en)
+               irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT;
+
+       if (digit_en) {
+               if (da->has_framedonetv_irq) {
+                       irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT;
+               } else {
+                       irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT |
+                               1 << EVSYNC_ODD_IRQ_SHIFT;
+               }
+       }
+
+       if (lcd2_en)
+               irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
+
+       /*
+        * clear any previous FRAMEDONE, FRAMEDONETV,
+        * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
+        */
+       omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
+
+       /* disable LCD and TV managers */
+       v = omap_hwmod_read(oh, DISPC_CONTROL);
+       v &= ~(LCD_EN_MASK | DIGIT_EN_MASK);
+       omap_hwmod_write(v, oh, DISPC_CONTROL);
+
+       /* disable LCD2 manager */
+       if (da->manager_count > 2) {
+               v = omap_hwmod_read(oh, DISPC_CONTROL2);
+               v &= ~LCD_EN_MASK;
+               omap_hwmod_write(v, oh, DISPC_CONTROL2);
+       }
+
+       i = 0;
+       while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
+              irq_mask) {
+               i++;
+               if (i > FRAMEDONE_IRQ_TIMEOUT) {
+                       pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
+                       break;
+               }
+               mdelay(1);
+       }
+}
+
+#define MAX_MODULE_SOFTRESET_WAIT      10000
+int omap_dss_reset(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_opt_clk *oc;
+       int c = 0;
+       int i, r;
+
+       if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) {
+               pr_err("dss_core: hwmod data doesn't contain reset data\n");
+               return -EINVAL;
+       }
+
+       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+               if (oc->_clk)
+                       clk_enable(oc->_clk);
+
+       dispc_disable_outputs();
+
+       /* clear SDI registers */
+       if (cpu_is_omap3430()) {
+               omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL);
+               omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL);
+       }
+
+       /*
+        * clear DSS_CONTROL register to switch DSS clock sources to
+        * PRCM clock, if any
+        */
+       omap_hwmod_write(0x0, oh, DSS_CONTROL);
+
+       omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs)
+                               & SYSS_RESETDONE_MASK),
+                       MAX_MODULE_SOFTRESET_WAIT, c);
+
+       if (c == MAX_MODULE_SOFTRESET_WAIT)
+               pr_warning("dss_core: waiting for reset to finish failed\n");
+       else
+               pr_debug("dss_core: softreset done\n");
+
+       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+               if (oc->_clk)
+                       clk_disable(oc->_clk);
+
+       r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
+
+       return r;
+}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
new file mode 100644 (file)
index 0000000..b871b01
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * display.h - OMAP2+ integration-specific DSS header
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+#define __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+
+#include <linux/kernel.h>
+
+struct omap_dss_dispc_dev_attr {
+       u8      manager_count;
+       bool    has_framedonetv_irq;
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
deleted file mode 100644 (file)
index e69de29..0000000
index 292eee3be15fdc10096b6833fc438b2ecab9ccea..28fcb27005d2912ec13dec5be59d777e721f67a6 100644 (file)
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                pdata->reg_size = 4;
                pdata->has_ccr = true;
        }
+       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+       if (id == 1)
+               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 
        if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
                if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                                        name, oh->name);
                return PTR_ERR(pdev);
        }
-       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
-       if (id == 1)
-               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
        omap_mcbsp_count++;
        return 0;
 }
index 6b3088db83b7e916314615b0f19023ab22805e07..207a2ff9a8c4e1473c6c497ab8c969f64e0ac0c7 100644 (file)
@@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
                ohii = &oh->mpu_irqs[i++];
        } while (ohii->irq != -1);
 
-       return i;
+       return i-1;
 }
 
 /**
@@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
                ohdi = &oh->sdma_reqs[i++];
        } while (ohdi->dma_req != -1);
 
-       return i;
+       return i-1;
 }
 
 /**
@@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
                mem = &os->addr[i++];
        } while (mem->pa_start != mem->pa_end);
 
-       return i;
+       return i-1;
 }
 
 /**
index 6d7206213525d03ac830d8ca6092ce4317863f37..a5409ce3f3233eaac531ed070fc1f9cf74e8398f 100644 (file)
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
 };
 
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+       /*
+        * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+        * driver does not use these clocks.
+        */
        { .role = "tv_clk", .clk = "dss_54m_fck" },
        { .role = "sys_clk", .clk = "dss2_fck" },
 };
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
        .slaves_cnt     = ARRAY_SIZE(omap2420_dss_slaves),
        .masters        = omap2420_dss_masters,
        .masters_cnt    = ARRAY_SIZE(omap2420_dss_masters),
-       .flags          = HWMOD_NO_IDLEST,
+       .flags          = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
 };
 
 /* l4_core -> dss_dispc */
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
        .slaves         = omap2420_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_dss_dispc_slaves),
        .flags          = HWMOD_NO_IDLEST,
+       .dev_attr       = &omap2_3_dss_dispc_dev_attr
 };
 
 /* l4_core -> dss_rfbi */
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
        &omap2420_l4_core__dss_rfbi,
 };
 
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+       { .role = "ick", .clk = "dss_ick" },
+};
+
 static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
        .class          = &omap2_rfbi_hwmod_class,
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
                        .module_offs = CORE_MOD,
                },
        },
+       .opt_clks       = dss_rfbi_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_rfbi_opt_clks),
        .slaves         = omap2420_dss_rfbi_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
 static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_dss_venc_hwmod,
-       .clk            = "dss_54m_fck",
+       .clk            = "dss_ick",
        .addr           = omap2_dss_venc_addrs,
        .fw = {
                .omap2 = {
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
 static struct omap_hwmod omap2420_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap2_venc_hwmod_class,
-       .main_clk       = "dss1_fck",
+       .main_clk       = "dss_54m_fck",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
index a2580d01c3ff98b25a57b6508bd959d0e58c700f..c4f56cb60d7d676ddda36e232c8b385a58fef6ba 100644 (file)
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
 };
 
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+       /*
+        * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+        * driver does not use these clocks.
+        */
        { .role = "tv_clk", .clk = "dss_54m_fck" },
        { .role = "sys_clk", .clk = "dss2_fck" },
 };
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
        .slaves_cnt     = ARRAY_SIZE(omap2430_dss_slaves),
        .masters        = omap2430_dss_masters,
        .masters_cnt    = ARRAY_SIZE(omap2430_dss_masters),
-       .flags          = HWMOD_NO_IDLEST,
+       .flags          = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
 };
 
 /* l4_core -> dss_dispc */
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
        .slaves         = omap2430_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_dss_dispc_slaves),
        .flags          = HWMOD_NO_IDLEST,
+       .dev_attr       = &omap2_3_dss_dispc_dev_attr
 };
 
 /* l4_core -> dss_rfbi */
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
        &omap2430_l4_core__dss_rfbi,
 };
 
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+       { .role = "ick", .clk = "dss_ick" },
+};
+
 static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
        .class          = &omap2_rfbi_hwmod_class,
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
                        .module_offs = CORE_MOD,
                },
        },
+       .opt_clks       = dss_rfbi_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_rfbi_opt_clks),
        .slaves         = omap2430_dss_rfbi_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
 static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_dss_venc_hwmod,
-       .clk            = "dss_54m_fck",
+       .clk            = "dss_ick",
        .addr           = omap2_dss_venc_addrs,
        .flags          = OCPIF_SWSUP_IDLE,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
 static struct omap_hwmod omap2430_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap2_venc_hwmod_class,
-       .main_clk       = "dss1_fck",
+       .main_clk       = "dss_54m_fck",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
index c451729d289adfd17090c3be58c53937626835fe..c11273da5dcc33f046e94babfdb6f34c1d6f2778 100644 (file)
@@ -11,6 +11,7 @@
 #include <plat/omap_hwmod.h>
 #include <plat/serial.h>
 #include <plat/dma.h>
+#include <plat/common.h>
 
 #include <mach/irqs.h>
 
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0010,
        .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+                          SYSS_HAS_RESET_STATUS),
        .sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
 struct omap_hwmod_class omap2_dss_hwmod_class = {
        .name   = "dss",
        .sysc   = &omap2_dss_sysc,
+       .reset  = omap_dss_reset,
 };
 
 /*
index bc9035ec87fc59aa08410cdbc24156619c87c7a1..eef43e2e163e92224e23ea3c5b6250c14d2747fb 100644 (file)
@@ -1369,9 +1369,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
 };
 
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
-       { .role = "tv_clk", .clk = "dss_tv_fck" },
-       { .role = "video_clk", .clk = "dss_96m_fck" },
+       /*
+        * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+        * driver does not use these clocks.
+        */
        { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+       { .role = "tv_clk", .clk = "dss_tv_fck" },
+       /* required only on OMAP3430 */
+       { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
 };
 
 static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1394,11 +1399,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
        .slaves_cnt     = ARRAY_SIZE(omap3430es1_dss_slaves),
        .masters        = omap3xxx_dss_masters,
        .masters_cnt    = ARRAY_SIZE(omap3xxx_dss_masters),
-       .flags          = HWMOD_NO_IDLEST,
+       .flags          = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
 };
 
 static struct omap_hwmod omap3xxx_dss_core_hwmod = {
        .name           = "dss_core",
+       .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .class          = &omap2_dss_hwmod_class,
        .main_clk       = "dss1_alwon_fck", /* instead of dss_fck */
        .sdma_reqs      = omap3xxx_dss_sdma_chs,
@@ -1456,6 +1462,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
        .slaves         = omap3xxx_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
        .flags          = HWMOD_NO_IDLEST,
+       .dev_attr       = &omap2_3_dss_dispc_dev_attr
 };
 
 /*
@@ -1486,6 +1493,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_dsi1_hwmod,
+       .clk            = "dss_ick",
        .addr           = omap3xxx_dss_dsi1_addrs,
        .fw = {
                .omap2 = {
@@ -1502,6 +1510,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
        &omap3xxx_l4_core__dss_dsi1,
 };
 
+static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
+       { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+};
+
 static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
        .name           = "dss_dsi1",
        .class          = &omap3xxx_dsi_hwmod_class,
@@ -1514,6 +1526,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
                        .module_offs = OMAP3430_DSS_MOD,
                },
        },
+       .opt_clks       = dss_dsi1_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_dsi1_opt_clks),
        .slaves         = omap3xxx_dss_dsi1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -1540,6 +1554,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
        &omap3xxx_l4_core__dss_rfbi,
 };
 
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+       { .role = "ick", .clk = "dss_ick" },
+};
+
 static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
        .class          = &omap2_rfbi_hwmod_class,
@@ -1551,6 +1569,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
                        .module_offs = OMAP3430_DSS_MOD,
                },
        },
+       .opt_clks       = dss_rfbi_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_rfbi_opt_clks),
        .slaves         = omap3xxx_dss_rfbi_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -1560,7 +1580,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_venc_hwmod,
-       .clk            = "dss_tv_fck",
+       .clk            = "dss_ick",
        .addr           = omap2_dss_venc_addrs,
        .fw = {
                .omap2 = {
@@ -1578,10 +1598,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
        &omap3xxx_l4_core__dss_venc,
 };
 
+static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
+       /* required only on OMAP3430 */
+       { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
+};
+
 static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap2_venc_hwmod_class,
-       .main_clk       = "dss1_alwon_fck",
+       .main_clk       = "dss_tv_fck",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -1589,6 +1614,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
                        .module_offs = OMAP3430_DSS_MOD,
                },
        },
+       .opt_clks       = dss_venc_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_venc_opt_clks),
        .slaves         = omap3xxx_dss_venc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -3220,18 +3247,14 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
 
 /* 3430ES1-only hwmods */
 static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
-       &omap3xxx_iva_hwmod,
        &omap3430es1_dss_core_hwmod,
-       &omap3xxx_mailbox_hwmod,
        NULL
 };
 
 /* 3430ES2+-only hwmods */
 static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
-       &omap3xxx_iva_hwmod,
        &omap3xxx_dss_core_hwmod,
        &omap3xxx_usbhsotg_hwmod,
-       &omap3xxx_mailbox_hwmod,
        NULL
 };
 
index 7695e5d43316686429158ec56aafacbc7d25a6a7..daaf165af696f212c6dcadb4db7ac3539d54dcd6 100644 (file)
@@ -30,6 +30,7 @@
 #include <plat/mmc.h>
 #include <plat/i2c.h>
 #include <plat/dmtimer.h>
+#include <plat/common.h>
 
 #include "omap_hwmod_common_data.h"
 
@@ -1187,6 +1188,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
 static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
        .name   = "dss",
        .sysc   = &omap44xx_dss_sysc,
+       .reset  = omap_dss_reset,
 };
 
 /* dss */
@@ -1240,12 +1242,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
        { .role = "sys_clk", .clk = "dss_sys_clk" },
        { .role = "tv_clk", .clk = "dss_tv_clk" },
-       { .role = "dss_clk", .clk = "dss_dss_clk" },
-       { .role = "video_clk", .clk = "dss_48mhz_clk" },
+       { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
 };
 
 static struct omap_hwmod omap44xx_dss_hwmod = {
        .name           = "dss_core",
+       .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .class          = &omap44xx_dss_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .main_clk       = "dss_dss_clk",
@@ -1325,6 +1327,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
        { }
 };
 
+static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
+       .manager_count          = 3,
+       .has_framedonetv_irq    = 1
+};
+
 /* l4_per -> dss_dispc */
 static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
        .master         = &omap44xx_l4_per_hwmod,
@@ -1340,12 +1347,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
        &omap44xx_l4_per__dss_dispc,
 };
 
-static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
-       { .role = "sys_clk", .clk = "dss_sys_clk" },
-       { .role = "tv_clk", .clk = "dss_tv_clk" },
-       { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
 static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
        .name           = "dss_dispc",
        .class          = &omap44xx_dispc_hwmod_class,
@@ -1359,10 +1360,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
                        .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
                },
        },
-       .opt_clks       = dss_dispc_opt_clks,
-       .opt_clks_cnt   = ARRAY_SIZE(dss_dispc_opt_clks),
        .slaves         = omap44xx_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
+       .dev_attr       = &omap44xx_dss_dispc_dev_attr
 };
 
 /*
@@ -1624,7 +1624,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
-       .main_clk       = "dss_dss_clk",
+       .main_clk       = "dss_48mhz_clk",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -1785,7 +1785,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap44xx_venc_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
-       .main_clk       = "dss_dss_clk",
+       .main_clk       = "dss_tv_clk",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
index de832ebc93a98c8d2556675de6ae3bfd7a5b2342..51e5418899fb446cd2317efe2622e3683860eec4 100644 (file)
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
        .srst_shift     = SYSC_TYPE2_SOFTRESET_SHIFT,
 };
 
+struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
+       .manager_count          = 2,
+       .has_framedonetv_irq    = 0
+};
index 39a7c37f45870446a9f61d682eb5cc333cbac94c..ad5d8f04c0b8803edeac09dff53686c156365b91 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <plat/omap_hwmod.h>
 
+#include "display.h"
+
 /* Common address space across OMAP2xxx */
 extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
 extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_mcspi_class;
 
+extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr;
+
 #endif
index 6a66aa5e2a5b368f702f646366cb7175e6c2ad52..d15225ff5c4969b3ddde9cc79ece7caf725001eb 100644 (file)
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev)
 static const struct of_device_id l3_noc_match[] = {
        {.compatible = "ti,omap4-l3-noc", },
        {},
-}
+};
 MODULE_DEVICE_TABLE(of, l3_noc_match);
 #else
 #define l3_noc_match NULL
index 1e79bdf313e311fc945fb56bb2dffa3817d3b86e..00bff46ca48beb606557f8fba7a1974f571ebb91 100644 (file)
@@ -24,6 +24,7 @@
 #include "powerdomain.h"
 #include "clockdomain.h"
 #include "pm.h"
+#include "twl-common.h"
 
 static struct omap_device_pm_latency *pm_lats;
 
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init);
 
 static int __init omap2_common_pm_late_init(void)
 {
-       /* Init the OMAP TWL parameters */
-       omap3_twl_init();
-       omap4_twl_init();
-
        /* Init the voltage layer */
+       omap_pmic_late_init();
        omap_voltage_late_init();
 
        /* Initialize the voltages */
index 6a4f6839a7d93fecc95259506c3ea38962a3ba5a..cf246b39bac745dc315576ecc161c35666643381 100644 (file)
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
                sr_write_reg(sr_info, ERRCONFIG_V1, status);
        } else if (sr_info->ip_type == SR_TYPE_V2) {
                /* Read the status bits */
-               sr_read_reg(sr_info, IRQSTATUS);
+               status = sr_read_reg(sr_info, IRQSTATUS);
 
                /* Clear them by writing back */
                sr_write_reg(sr_info, IRQSTATUS, status);
index 5224357721686dc41d5a703a3ba02ae1e1e7bf6a..10b20c652e5dc390026bc1eec5042ff351c387f4 100644 (file)
@@ -30,6 +30,7 @@
 #include <plat/usb.h>
 
 #include "twl-common.h"
+#include "pm.h"
 
 static struct i2c_board_info __initdata pmic_i2c_board_info = {
        .addr           = 0x48,
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
        omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
 }
 
+void __init omap_pmic_late_init(void)
+{
+       /* Init the OMAP TWL parameters (if PMIC has been registerd) */
+       if (!pmic_i2c_board_info.irq)
+               return;
+
+       omap3_twl_init();
+       omap4_twl_init();
+}
+
 #if defined(CONFIG_ARCH_OMAP3)
 static struct twl4030_usb_data omap3_usb_pdata = {
        .usb_mode       = T2_USB_MODE_ULPI,
index 5e83a5bd37fb719dd06ea8deac01e0c3c2922e80..275dde8cb27aa789ce4d9bfa89c8ed92fb122711 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __OMAP_PMIC_COMMON__
 #define __OMAP_PMIC_COMMON__
 
+#include <plat/irqs.h>
+
 #define TWL_COMMON_PDATA_USB           (1 << 0)
 #define TWL_COMMON_PDATA_BCI           (1 << 1)
 #define TWL_COMMON_PDATA_MADC          (1 << 2)
@@ -30,6 +32,7 @@ struct twl4030_platform_data;
 
 void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
                    struct twl4030_platform_data *pmic_data);
+void omap_pmic_late_init(void);
 
 static inline void omap2_pmic_init(const char *pmic_type,
                                   struct twl4030_platform_data *pmic_data)
index cb53160f6c5d3127b961edbf102e70ab34733712..26ebb57719df5d3fbf389e46cb723b92029b8191 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/suspend.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
index ef555c041962983814be50c03d606b91bcd15b0d..a12b689a87026c9e06af50351b2b20d8c7e01d7d 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <asm/sizes.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <linux/of.h>
index fc0b8544e17455f27fb020cee143dbaee16123b2..4b81f59a4cbaf5aebbac8c1d55980d9b4dd45618 100644 (file)
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static void balloon3_udc_command(int cmd)
 {
        if (cmd == PXA2XX_UDC_CMD_CONNECT)
index 692e1ffc558628526105f3b378f6ce745650480e..d23b92b80488257db2ef2bf4af458fc4060feaf8 100644 (file)
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
 static inline void __init colibri_pxa320_init_eth(void) {}
 #endif /* CONFIG_AX88796 */
 
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
        .gpio_vbus              = mfp_to_gpio(MFP_PIN_GPIO96),
        .gpio_pullup            = -1,
index 9c8208ca04150e4a3bbb2d41cccb28287a0efe99..ffdd70dad327dd135f1a2d5fc36a3cde45f2f86b 100644 (file)
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
 }
 #endif
 
-#ifdef CONFIG_USB_GADGET_PXA25X
+#ifdef CONFIG_USB_PXA25X
 static struct gpio_vbus_mach_info gumstix_udc_info = {
        .gpio_vbus              = GPIO_GUMSTIX_USB_GPIOn,
        .gpio_pullup            = GPIO_GUMSTIX_USB_GPIOx,
index f80bbe246afe5812b17db200d709b13f6160f72c..d4eac3d6ffb5ecd04df4abeb0ed45b32a2882705 100644 (file)
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
 #define palm27x_lcd_init(power, mode)  do {} while (0)
 #endif
 
-#if    defined(CONFIG_USB_GADGET_PXA27X) || \
-       defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if    defined(CONFIG_USB_PXA27X) || \
+       defined(CONFIG_USB_PXA27X_MODULE)
 extern void __init palm27x_udc_init(int vbus, int pullup,
                                        int vbus_inverted);
 #else
index 325c245c0a0dd3916129b2875caaec931758137e..fbc10d7b95d1e8ef7ceec5dd03d512c7c2d314bf 100644 (file)
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if    defined(CONFIG_USB_GADGET_PXA27X) || \
-       defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if    defined(CONFIG_USB_PXA27X) || \
+       defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info palm27x_udc_info = {
        .gpio_vbus_inverted     = 1,
 };
index 6ec7caefb37c8219d02f45fc0088249295006fa5..2c24c67fd92b6d863fc2e534b6ad9f15b7daaaf7 100644 (file)
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
 /******************************************************************************
  * UDC
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
+#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
 static struct gpio_vbus_mach_info palmtc_udc_info = {
        .gpio_vbus              = GPIO_NR_PALMTC_USB_DETECT_N,
        .gpio_vbus_inverted     = 1,
index a7539a6ed1ff2a5d1ce4a460183a17fe28900e45..ca0c6615028c42aa0414cc2e76de4a0a29c83918 100644 (file)
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
        .gpio_vbus              = GPIO41_VPAC270_UDC_DETECT,
        .gpio_pullup            = -1,
index 5e6b42089eb44d7048b39cc2855403cd44cc0ddc..3341fd118723ff11cc7a21df95241c1252c2f6a0 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/gpio.h>
index 66668565ee75e6694c086030504bf5d01fc59bef..f208154b1382d0492898ebba6b6d7c14079ad80d 100644 (file)
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 
index 7a3bc32df425a3426016ec5448e884ad0e0443ea..51c00f2453c6c62ef9537143c0f31ad8ea416424 100644 (file)
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
        s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
 }
 
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
        .name   = "s3c6400-core",
 };
 
index 83d2afb79e9f88370fbced458f6ffbf9a5b1b1c8..2cf80026c58d470c5f328da8b6df696fa4514bae 100644 (file)
@@ -20,7 +20,7 @@
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
 {
        s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
        s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
index a9106c392398c1c61dc16f01914508b7b5a8956c..8662ef6e5681a40df5eaaf7eb4dce07127e1b594 100644 (file)
@@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
 
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
        .pwm_id = 3,
+       .pwm_period_ns = 1000,
 };
 
 static void __init smdkv210_map_io(void)
index 5a616f6e56120c850c9afa93af19513a4b80ef1c..f7951aa0456287eadb4356ef5d1a3ecd3266abba 100644 (file)
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
-   zreladdr-$(CONFIG_SA1111)           += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+   zreladdr-y  += 0xc0208000
 else
    zreladdr-y  += 0xc0008000
 endif
index b862e9f81e3e557935f13df5ec6fa29128b4d86c..7119b87cbfa0caa2727334b7815a8cf388ff6077 100644 (file)
@@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = {
 
 MACHINE_START(AG5EVM, "ag5evm")
        .map_io         = ag5evm_map_io,
+       .nr_irqs        = NR_IRQS_LEGACY,
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = shmobile_handle_irq_gic,
        .init_machine   = ag5evm_init,
index bd9a78424d6b8e25a56b548ed3f38954981b5678..f44150b5ae46fc3a9f9c7ef57d04a30b2031dde3 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/input/sh_keysc.h>
 #include <linux/gpio_keys.h>
 #include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
 #include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
                .flags          = IORESOURCE_MEM,
        },
        [1] = {
-               .start          = gic_spi(33), /* PINTA2 @ PORT144 */
+               .start          = SH73A0_PINT0_IRQ(2), /* PINTA2 */
                .flags          = IORESOURCE_IRQ,
        },
 };
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
 #define GPIO_LED(n, g) { .name = n, .gpio = g }
 
 static struct gpio_led gpio_leds[] = {
-       GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
-       GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
-       GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
-       GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
        GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
        GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
        GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
        },
 };
 
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+       .name           = "V2513",
+       .pin_gpio_fn    = GPIO_FN_TPU1TO2,
+       .pin_gpio       = GPIO_PORT153,
+       .channel_offset = 0x90,
+       .timer_bit = 2,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+       [0] = {
+               .name   = "TPU12",
+               .start  = 0xe6610090,
+               .end    = 0xe66100b5,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu12_device = {
+       .name = "leds-renesas-tpu",
+       .id = 12,
+       .dev = {
+               .platform_data  = &led_renesas_tpu12_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu12_resources),
+       .resource       = tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+       .name           = "V2514",
+       .pin_gpio_fn    = GPIO_FN_TPU4TO1,
+       .pin_gpio       = GPIO_PORT199,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+       [0] = {
+               .name   = "TPU41",
+               .start  = 0xe6640050,
+               .end    = 0xe6640075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu41_device = {
+       .name = "leds-renesas-tpu",
+       .id = 41,
+       .dev = {
+               .platform_data  = &led_renesas_tpu41_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu41_resources),
+       .resource       = tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+       .name           = "V2515",
+       .pin_gpio_fn    = GPIO_FN_TPU2TO1,
+       .pin_gpio       = GPIO_PORT197,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+       [0] = {
+               .name   = "TPU21",
+               .start  = 0xe6620050,
+               .end    = 0xe6620075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu21_device = {
+       .name = "leds-renesas-tpu",
+       .id = 21,
+       .dev = {
+               .platform_data  = &led_renesas_tpu21_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu21_resources),
+       .resource       = tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+       .name           = "KEYLED",
+       .pin_gpio_fn    = GPIO_FN_TPU3TO0,
+       .pin_gpio       = GPIO_PORT163,
+       .channel_offset = 0x10,
+       .timer_bit = 0,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+       [0] = {
+               .name   = "TPU30",
+               .start  = 0xe6630010,
+               .end    = 0xe6630035,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu30_device = {
+       .name = "leds-renesas-tpu",
+       .id = 30,
+       .dev = {
+               .platform_data  = &led_renesas_tpu30_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu30_resources),
+       .resource       = tpu30_resources,
+};
+
 /* MMCIF */
 static struct resource mmcif_resources[] = {
        [0] = {
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
        &keysc_device,
        &gpio_keys_device,
        &gpio_leds_device,
+       &leds_tpu12_device,
+       &leds_tpu41_device,
+       &leds_tpu21_device,
+       &leds_tpu30_device,
        &mmcif_device,
        &sdhi0_device,
        &sdhi1_device,
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
        shmobile_setup_console();
 }
 
-#define PINTER0A       0xe69000a0
-#define PINTCR0A       0xe69000b0
-
-void __init kota2_init_irq(void)
-{
-       sh73a0_init_irq();
-
-       /* setup PINT: enable PINTA2 as active low */
-       __raw_writel(1 << 29, PINTER0A);
-       __raw_writew(2 << 10, PINTCR0A);
-}
-
 static void __init kota2_init(void)
 {
        sh73a0_pinmux_init();
@@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
 
 MACHINE_START(KOTA2, "kota2")
        .map_io         = kota2_map_io,
-       .init_irq       = kota2_init_irq,
+       .nr_irqs        = NR_IRQS_LEGACY,
+       .init_irq       = sh73a0_init_irq,
        .handle_irq     = shmobile_handle_irq_gic,
        .init_machine   = kota2_init,
        .timer          = &kota2_timer,
index 61a846bb30f2034ec3ae69253aea2d2d6d695aa8..1370a89ca358ba548c80ae5ed3ec29b52156b501 100644 (file)
@@ -113,6 +113,12 @@ static struct clk main_clk = {
        .ops            = &main_clk_ops,
 };
 
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+       .ops            = &div2_clk_ops,
+       .parent         = &main_clk,
+};
+
 /* PLL0, PLL1, PLL2, PLL3 */
 static unsigned long pll_recalc(struct clk *clk)
 {
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
        &extal1_div2_clk,
        &extal2_div2_clk,
        &main_clk,
+       &main_div2_clk,
        &pll0_clk,
        &pll1_clk,
        &pll2_clk,
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
        [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
        [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
        [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
-       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
        [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
        [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
        [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@ enum { MSTP001,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
        MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
        MSTP314, MSTP313, MSTP312, MSTP311,
+       MSTP303, MSTP302, MSTP301, MSTP300,
        MSTP411, MSTP410, MSTP403,
        MSTP_NR };
 
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
        [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+       [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+       [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+       [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+       [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
        [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
        [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
        [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
        CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
        CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
        CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
        CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
index 0a5b22942fd3555d38591dd7e24c0a556079b9e9..34bbcbfb1706f1b74d4d5c92965e491d32204e21 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/bitrev.h>
+#include <linux/console.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
@@ -106,9 +107,8 @@ static int pd_power_down(struct generic_pm_domain *genpd)
        return 0;
 }
 
-static int pd_power_up(struct generic_pm_domain *genpd)
+static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
 {
-       struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
        unsigned int mask = 1 << sh7372_pd->bit_shift;
        unsigned int retry_count;
        int ret = 0;
@@ -123,13 +123,13 @@ static int pd_power_up(struct generic_pm_domain *genpd)
 
        for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
                if (!(__raw_readl(SWUCR) & mask))
-                       goto out;
+                       break;
                if (retry_count > PSTR_RETRIES)
                        udelay(PSTR_DELAY_US);
                else
                        cpu_relax();
        }
-       if (__raw_readl(SWUCR) & mask)
+       if (!retry_count)
                ret = -EIO;
 
        if (!sh7372_pd->no_debug)
@@ -137,12 +137,17 @@ static int pd_power_up(struct generic_pm_domain *genpd)
                         mask, __raw_readl(PSTR));
 
  out:
-       if (ret == 0 && sh7372_pd->resume)
+       if (ret == 0 && sh7372_pd->resume && do_resume)
                sh7372_pd->resume();
 
        return ret;
 }
 
+static int pd_power_up(struct generic_pm_domain *genpd)
+{
+        return __pd_power_up(to_sh7372_pd(genpd), true);
+}
+
 static void sh7372_a4r_suspend(void)
 {
        sh7372_intcs_suspend();
@@ -174,7 +179,7 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
        genpd->active_wakeup = pd_active_wakeup;
        genpd->power_off = pd_power_down;
        genpd->power_on = pd_power_up;
-       genpd->power_on(&sh7372_pd->genpd);
+       __pd_power_up(sh7372_pd, false);
 }
 
 void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
@@ -227,11 +232,23 @@ struct sh7372_pm_domain sh7372_a3sp = {
        .no_debug = true,
 };
 
+static void sh7372_a3sp_init(void)
+{
+       /* serial consoles make use of SCIF hardware located in A3SP,
+        * keep such power domain on if "no_console_suspend" is set.
+        */
+       sh7372_a3sp.stay_on = !console_suspend_enabled;
+}
+
 struct sh7372_pm_domain sh7372_a3sg = {
        .bit_shift = 13,
 };
 
-#endif /* CONFIG_PM */
+#else /* !CONFIG_PM */
+
+static inline void sh7372_a3sp_init(void) {}
+
+#endif /* !CONFIG_PM */
 
 #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
 static int sh7372_do_idle_core_standby(unsigned long unused)
@@ -465,6 +482,8 @@ void __init sh7372_pm_init(void)
        /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
        __raw_writel(0, PDNSEL);
 
+       sh7372_a3sp_init();
+
        sh7372_suspend_init();
        sh7372_cpuidle_init();
 }
index 7a1fa6adb7c32d5645b1f0e608b1e61794081afc..5b0c38abacc107a7eae8bd35ec8f03666f3422e7 100644 (file)
@@ -422,7 +422,7 @@ struct platform_device nuc900_device_kpi = {
 
 /* LCD controller*/
 
-static struct nuc900fb_display __initdata nuc900_lcd_info[] = {
+static struct nuc900fb_display nuc900_lcd_info[] = {
        /* Giantplus Technology GPM1040A0 320x240 Color TFT LCD */
        [0] = {
                .type           = LCM_DCCS_VA_SRC_RGB565,
@@ -445,7 +445,7 @@ static struct nuc900fb_display __initdata nuc900_lcd_info[] = {
        },
 };
 
-static struct nuc900fb_mach_info nuc900_fb_info __initdata = {
+static struct nuc900fb_mach_info nuc900_fb_info = {
 #if defined(CONFIG_GPM1040A0_320X240)
        .displays               = &nuc900_lcd_info[0],
 #else
index 94c0e71617c6c3487aaf20a4f1b7bfd34023e580..23ef1f573abd3395ac872ab6b39be6bc29090a8d 100644 (file)
@@ -19,6 +19,7 @@
 extern void mfp_set_groupf(struct device *dev);
 extern void mfp_set_groupc(struct device *dev);
 extern void mfp_set_groupi(struct device *dev);
-extern void mfp_set_groupg(struct device *dev);
+extern void mfp_set_groupg(struct device *dev, const char *subname);
+extern void mfp_set_groupd(struct device *dev, const char *subname);
 
 #endif /* __ASM_ARCH_MFP_H */
index bd94819e314fed7f67173c016a00bbcfbacc38c6..2c4e0c1285010fdfee53bebeaff4cd638c50a56f 100644 (file)
@@ -14,7 +14,7 @@
 #ifndef __ASM_ARCH_SPI_H
 #define __ASM_ARCH_SPI_H
 
-extern void mfp_set_groupg(struct device *dev);
+extern void mfp_set_groupg(struct device *dev, const char *subname);
 
 struct nuc900_spi_info {
        unsigned int num_cs;
index fb7fb627b1a564ca920fdb747080988f3fb75551..9dd74612bb8707560abd06e6adc250f7ecd54bf2 100644 (file)
 #define REG_MFSEL      (W90X900_VA_GCR + 0xC)
 
 #define GPSELF         (0x01 << 1)
-
 #define GPSELC         (0x03 << 2)
-#define ENKPI          (0x02 << 2)
-#define ENNAND         (0x01 << 2)
+#define GPSELD         (0x0f << 4)
 
 #define GPSELEI0       (0x01 << 26)
 #define GPSELEI1       (0x01 << 27)
 #define GPIOG0TO1      (0x03 << 14)
 #define GPIOG2TO3      (0x03 << 16)
 #define GPIOG22TO23    (0x03 << 22)
+#define GPIOG18TO20    (0x07 << 18)
 
 #define ENSPI          (0x0a << 14)
 #define ENI2C0         (0x01 << 14)
 #define ENI2C1         (0x01 << 16)
 #define ENAC97         (0x02 << 22)
+#define ENSD1          (0x02 << 18)
+#define ENSD0          (0x0a << 4)
+#define ENKPI          (0x02 << 2)
+#define ENNAND         (0x01 << 2)
 
 static DEFINE_MUTEX(mfp_mutex);
 
@@ -127,16 +130,19 @@ void mfp_set_groupi(struct device *dev)
 }
 EXPORT_SYMBOL(mfp_set_groupi);
 
-void mfp_set_groupg(struct device *dev)
+void mfp_set_groupg(struct device *dev, const char *subname)
 {
        unsigned long mfpen;
        const char *dev_id;
 
-       BUG_ON(!dev);
+       BUG_ON((!dev) && (!subname));
 
        mutex_lock(&mfp_mutex);
 
-       dev_id = dev_name(dev);
+       if (subname != NULL)
+               dev_id = subname;
+       else
+               dev_id = dev_name(dev);
 
        mfpen = __raw_readl(REG_MFSEL);
 
@@ -152,6 +158,9 @@ void mfp_set_groupg(struct device *dev)
        } else if (strcmp(dev_id, "nuc900-audio") == 0) {
                mfpen &= ~(GPIOG22TO23);
                mfpen |= ENAC97;/*enable AC97*/
+       } else if (strcmp(dev_id, "nuc900-mmc-port1") == 0) {
+               mfpen &= ~(GPIOG18TO20);
+               mfpen |= (ENSD1 | 0x01);/*enable sd1*/
        } else {
                mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/
        }
@@ -162,3 +171,30 @@ void mfp_set_groupg(struct device *dev)
 }
 EXPORT_SYMBOL(mfp_set_groupg);
 
+void mfp_set_groupd(struct device *dev, const char *subname)
+{
+       unsigned long mfpen;
+       const char *dev_id;
+
+       BUG_ON((!dev) && (!subname));
+
+       mutex_lock(&mfp_mutex);
+
+       if (subname != NULL)
+               dev_id = subname;
+       else
+               dev_id = dev_name(dev);
+
+       mfpen = __raw_readl(REG_MFSEL);
+
+       if (strcmp(dev_id, "nuc900-mmc-port0") == 0) {
+               mfpen &= ~GPSELD;/*enable sd0*/
+               mfpen |= ENSD0;
+       } else
+               mfpen &= (~GPSELD);
+
+       __raw_writel(mfpen, REG_MFSEL);
+
+       mutex_unlock(&mfp_mutex);
+}
+EXPORT_SYMBOL(mfp_set_groupd);
index 8ac9e9f84790bb946716016607478ad97711376d..b1e192ba8c2450cb75a8118f86f8b650be284c82 100644 (file)
@@ -61,7 +61,7 @@ static inline void cache_sync(void)
 {
        void __iomem *base = l2x0_base;
 
-#ifdef CONFIG_ARM_ERRATA_753970
+#ifdef CONFIG_PL310_ERRATA_753970
        /* write to an unmmapped register */
        writel_relaxed(0, base + L2X0_DUMMY_REG);
 #else
index e4e7f6cba1ab4823fdbf0fe7ab2109bf6df89ed7..1aa664a1999fce45c2548726b50b6fa924608ec2 100644 (file)
@@ -168,7 +168,7 @@ static int __init consistent_init(void)
        pte_t *pte;
        int i = 0;
        unsigned long base = consistent_base;
-       unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT;
+       unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        struct page *page;
        void *addr;
 
+       /*
+        * Following is a work-around (a.k.a. hack) to prevent pages
+        * with __GFP_COMP being passed to split_page() which cannot
+        * handle them.  The real problem is that this flag probably
+        * should be 0 on ARM as it is not supported on this
+        * platform; see CONFIG_HUGETLBFS.
+        */
+       gfp &= ~(__GFP_COMP);
+
        *handle = ~0;
        size = PAGE_ALIGN(size);
 
index 74be05f3e03ac58be921aff208c6f6aa460ab683..44b628e4d6ea9c0121acf892ffbcfb30d0fc40ad 100644 (file)
@@ -9,8 +9,7 @@
 #include <linux/io.h>
 #include <linux/personality.h>
 #include <linux/random.h>
-#include <asm/cputype.h>
-#include <asm/system.h>
+#include <asm/cachetype.h>
 
 #define COLOUR_ALIGN(addr,pgoff)               \
        ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        unsigned long start_addr;
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
-       unsigned int cache_type;
-       int do_align = 0, aliasing = 0;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
 
        /*
         * We only need to do colour alignment if either the I or D
-        * caches alias.  This is indicated by bits 9 and 21 of the
-        * cache type register.
+        * caches alias.
         */
-       cache_type = read_cpuid_cachetype();
-       if (cache_type != read_cpuid_id()) {
-               aliasing = (cache_type | cache_type >> 12) & (1 << 11);
-               if (aliasing)
-                       do_align = filp || flags & MAP_SHARED;
-       }
-#else
-#define do_align 0
-#define aliasing 0
-#endif
+       if (aliasing)
+               do_align = filp || (flags & MAP_SHARED);
 
        /*
         * We enforce the MAP_FIXED case.
index 2c559ac381425d325757c68d83e73c37c77e6b5d..e70a73731eaacb823b15f625ddb614c99b44c5f2 100644 (file)
@@ -363,11 +363,13 @@ __v7_setup:
        orreq   r10, r10, #1 << 6               @ set bit #6
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
 #endif
-#ifdef CONFIG_ARM_ERRATA_751472
-       cmp     r6, #0x30                       @ present prior to r3p0
+#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
+       ALT_SMP(cmp r6, #0x30)                  @ present prior to r3p0
+       ALT_UP_B(1f)
        mrclt   p15, 0, r10, c15, c0, 1         @ read diagnostic register
        orrlt   r10, r10, #1 << 11              @ set bit #11
        mcrlt   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+1:
 #endif
 
 3:     mov     r10, #0
index c074e66ad224e83d18d1f278afc78aa00e18b494..4e0a371630b38fb3a950b9063f30a53d3ed0b5a7 100644 (file)
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
        oprofile_perf_exit();
 }
index 74aac96cda2007f9edcd0aea03c9fd40520952a1..73db34bf588ae7985fc50c71e9e2a4aa3af7a5f8 100644 (file)
@@ -17,6 +17,7 @@
  * the CPU clock speed on the fly.
  */
 
+#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -97,7 +98,7 @@ static int mxc_set_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static int __init mxc_cpufreq_init(struct cpufreq_policy *policy)
+static int mxc_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret;
        int i;
index 83b745a5e1b724e92ee87be633dc15b00edab968..c75f254abd857c07e6f8133716b51d5925f17581 100644 (file)
@@ -85,7 +85,6 @@ enum mxc_cpu_pwr_mode {
 };
 
 extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
-extern void (*imx_idle)(void);
 extern void imx_print_silicon_rev(const char *cpu, int srev);
 
 void avic_handle_irq(struct pt_regs *);
@@ -133,4 +132,5 @@ extern void imx53_qsb_common_init(void);
 extern void imx53_smd_common_init(void);
 extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
 extern void imx6q_pm_init(void);
+extern void imx6q_clock_map_io(void);
 #endif
index 00a78193c681ae16f5ed61f1b587f3ecf9e777f8..a4d36d601d55a5ed493f9191929850c6530f804f 100644 (file)
 #define IMX_CHIP_REVISION_3_3          0x33
 #define IMX_CHIP_REVISION_UNKNOWN      0xff
 
-#define IMX_CHIP_REVISION_1_0_STRING           "1.0"
-#define IMX_CHIP_REVISION_1_1_STRING           "1.1"
-#define IMX_CHIP_REVISION_1_2_STRING           "1.2"
-#define IMX_CHIP_REVISION_1_3_STRING           "1.3"
-#define IMX_CHIP_REVISION_2_0_STRING           "2.0"
-#define IMX_CHIP_REVISION_2_1_STRING           "2.1"
-#define IMX_CHIP_REVISION_2_2_STRING           "2.2"
-#define IMX_CHIP_REVISION_2_3_STRING           "2.3"
-#define IMX_CHIP_REVISION_3_0_STRING           "3.0"
-#define IMX_CHIP_REVISION_3_1_STRING           "3.1"
-#define IMX_CHIP_REVISION_3_2_STRING           "3.2"
-#define IMX_CHIP_REVISION_3_3_STRING           "3.3"
-#define IMX_CHIP_REVISION_UNKNOWN_STRING       "unknown"
-
 #ifndef __ASSEMBLY__
 extern unsigned int __mxc_cpu_type;
 #endif
index cf88b3593fba794d1a2de0c871f278ec03988da5..b9895d250167cf6b384d5cb1320e00e855537019 100644 (file)
 #ifndef __ASM_ARCH_MXC_SYSTEM_H__
 #define __ASM_ARCH_MXC_SYSTEM_H__
 
-extern void (*imx_idle)(void);
-
 static inline void arch_idle(void)
 {
-       if (imx_idle != NULL)
-               (imx_idle)();
-       else
-               cpu_do_idle();
+       cpu_do_idle();
 }
 
 void arch_reset(char mode, const char *cmd);
index 88fd40452567a30aeb6f2b55ba1e3f4684959a78..477971b009308a929c53c19ce40c4b35c425d623 100644 (file)
@@ -98,6 +98,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
        case MACH_TYPE_PCM043:
        case MACH_TYPE_LILLY1131:
        case MACH_TYPE_VPR200:
+       case MACH_TYPE_EUKREA_CPUIMX35SD:
                uart_base = MX3X_UART1_BASE_ADDR;
                break;
        case MACH_TYPE_MAGX_ZN5:
index 42d74ea590848fe2fc31fbdf16dfb19d77cf78ab..e032717f7d02c211ee8cad0d0200cec5f83974cf 100644 (file)
@@ -32,6 +32,9 @@
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN                (1 << 24)
+#define MX3_PWMCR_WAITEN                (1 << 23)
+#define MX3_PWMCR_DBGEN                        (1 << 22)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_EN              (1 << 0)
@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
                do_div(c, period_ns);
                duty_cycles = c;
 
+               /*
+                * according to imx pwm RM, the real period value should be
+                * PERIOD value in PWMPR plus 2.
+                */
+               if (period_cycles > 2)
+                       period_cycles -= 2;
+               else
+                       period_cycles = 0;
+
                writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
                writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
-               cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+               cr = MX3_PWMCR_PRESCALER(prescale) |
+                       MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+                       MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
 
                if (cpu_is_mx25())
                        cr |= MX3_PWMCR_CLKSRC_IPG;
index 9dad8dcc2ea9dde822720e14c0c55ead52e8a420..d65fb31a55ca47ef350e38da864b6d34cf5295f9 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 
 #include <mach/hardware.h>
 #include <mach/common.h>
@@ -28,8 +29,8 @@
 #include <asm/system.h>
 #include <asm/mach-types.h>
 
-void (*imx_idle)(void) = NULL;
 void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL;
+EXPORT_SYMBOL_GPL(imx_ioremap);
 
 static void __iomem *wdog_base;
 
index 197ca03c3f7d8490a109a9b576b2e2f9c9c420e6..eb73ab40e9556ee03f36e63fa7c06b9e77b4b1d3 100644 (file)
@@ -165,8 +165,8 @@ struct dpll_data {
        u8                      auto_recal_bit;
        u8                      recal_en_bit;
        u8                      recal_st_bit;
-       u8                      flags;
 #  endif
+       u8                      flags;
 };
 
 #endif
index c50df4814f6f43935c7b021064c610f200ff0dc0..3ff3e36580f267df6890712efef55e8ee22bdbc7 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 
 #include <plat/i2c.h>
+#include <plat/omap_hwmod.h>
 
 struct sys_timer;
 
@@ -55,6 +56,8 @@ void am35xx_init_early(void);
 void ti816x_init_early(void);
 void omap4430_init_early(void);
 
+extern int omap_dss_reset(struct omap_hwmod *);
+
 void omap_sram_init(void);
 
 /*
index 41ab97ebe4cfc8877fc58cd09f446c02636a030d..10d160888133c72101101f8e1dfafb53ddaf1af9 100644 (file)
@@ -384,12 +384,16 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
        struct orion_gpio_chip *ochip;
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
+       char gc_label[16];
 
        if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
                return;
 
+       snprintf(gc_label, sizeof(gc_label), "orion_gpio%d",
+               orion_gpio_chip_count);
+
        ochip = orion_gpio_chips + orion_gpio_chip_count;
-       ochip->chip.label = "orion_gpio";
+       ochip->chip.label = kstrdup(gc_label, GFP_KERNEL);
        ochip->chip.request = orion_gpio_request;
        ochip->chip.direction_input = orion_gpio_direction_input;
        ochip->chip.get = orion_gpio_get;
index a9276667c2fb0e59c21e643aab28aef2debdee84..c7adad0e8de091ad42d021958bb1fc3b3c8cff03 100644 (file)
@@ -12,7 +12,7 @@
 */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
index e1cbc728c7759b1e1f54923f11120b3ea4647cb2..c8bec9c7655d417ebbcb78096f16a7cbeddcf54d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/export.h>
 
 #include <asm/pgtable.h>
 
index e657305644cc27140f8c878d99e34ac2ea668ac0..a976c023b286b4a1b991cfa63c2dc93e9912151d 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/pwm_backlight.h>
-#include <linux/slab.h>
 
 #include <plat/devs.h>
 #include <plat/gpio-cfg.h>
index dac4760c0f0aeb58b4c68b5b01913f41dc1430b4..95509d8eb140fda1367658d048660790253b35d5 100644 (file)
@@ -202,14 +202,6 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls,
 extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
 extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
 
-extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
-                                    struct s3c_cpufreq_config *cfg,
-                                    union s3c_iobank *iob);
-
-extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
-                                    struct s3c_cpufreq_config *cfg,
-                                    union s3c_iobank *iob);
-
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
 #define s3c_cpufreq_debugfs_call(x) x
 #else
@@ -226,6 +218,10 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
 extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
 
 #ifdef CONFIG_S3C2410_IOTIMING
+extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
+                                    struct s3c_cpufreq_config *cfg,
+                                    union s3c_iobank *iob);
+
 extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
 
@@ -235,6 +231,7 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg,
 extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
 #else
+#define s3c2410_iotiming_debugfs NULL
 #define s3c2410_iotiming_calc NULL
 #define s3c2410_iotiming_get NULL
 #define s3c2410_iotiming_set NULL
@@ -242,8 +239,10 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
 
 /* S3C2412 compatible routines */
 
-extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
-                               struct s3c_iotimings *timings);
+#ifdef CONFIG_S3C2412_IOTIMING
+extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
+                                    struct s3c_cpufreq_config *cfg,
+                                    union s3c_iobank *iob);
 
 extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
                                struct s3c_iotimings *timings);
@@ -253,6 +252,12 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg,
 
 extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
+#else
+#define s3c2412_iotiming_debugfs NULL
+#define s3c2412_iotiming_calc NULL
+#define s3c2412_iotiming_get NULL
+#define s3c2412_iotiming_set NULL
+#endif /* CONFIG_S3C2412_IOTIMING */
 
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG
 #define s3c_freq_dbg(x...) printk(KERN_INFO x)
index d48245bb02b3bbf6178a45e7e6ae48ad3ec8d7f6..df8155b9d4d19e550b881abf22df4dbbeab3315c 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef __PLAT_GPIO_CFG_H
 #define __PLAT_GPIO_CFG_H __FILE__
 
+#include<linux/types.h>
+
 typedef unsigned int __bitwise__ samsung_gpio_pull_t;
 typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
 
index efe1d564473e02c6195774fbc47c51a23a97fad4..312b510d86b76bf0b96feba92de8aae8163380c3 100644 (file)
@@ -11,7 +11,7 @@
 */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/pm_runtime.h>
index dc1185dcf80d559757d289f4490723888786c2fa..c559d8438c70ee2d873afc43a7d14ee1fc2002c2 100644 (file)
@@ -11,7 +11,7 @@
  * the Free Software Foundation; either version 2 of the License.
 */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 5bdeef9698470b354dc3f144caadabda08dae00d..ccbe16f47227e2ebb171032b2a5de66289014cd0 100644 (file)
@@ -1123,5 +1123,6 @@ blissc                    MACH_BLISSC             BLISSC                  3491
 thales_adc             MACH_THALES_ADC         THALES_ADC              3492
 ubisys_p9d_evp         MACH_UBISYS_P9D_EVP     UBISYS_P9D_EVP          3493
 atdgp318               MACH_ATDGP318           ATDGP318                3494
+m28evk                 MACH_M28EVK             M28EVK                  3613
 smdk4212               MACH_SMDK4212           SMDK4212                3638
 smdk4412               MACH_SMDK4412           SMDK4412                3765
index 32d90867a9841098177b1fd5969af784ef88ac19..5f2cdb3e428cd979601b6b15d5e04a24042c7a8e 100644 (file)
@@ -3,7 +3,7 @@ if ETRAX_ARCH_V10
 config ETRAX_ETHERNET
        bool "Ethernet support"
        depends on ETRAX_ARCH_V10
-       select NET_ETHERNET
+       select ETHERNET
        select NET_CORE
        select MII
        help
index e47e9c3401b08bbbde729c13e626b9a91823ec18..de43aadcdbc47963cdaa63e0e522cdc171e3d432 100644 (file)
@@ -3,7 +3,7 @@ if ETRAX_ARCH_V32
 config ETRAX_ETHERNET
        bool "Ethernet support"
        depends on ETRAX_ARCH_V32
-       select NET_ETHERNET
+       select ETHERNET
        select NET_CORE
        select MII
        help
index 6073b187528a26a8ac16826aa98cd7986c5724ed..5a274af31b2b82b8f1e561fe66d7c2f0ce58d96b 100644 (file)
@@ -60,6 +60,7 @@ typedef u64 cputime64_t;
  */
 #define cputime_to_usecs(__ct)         ((__ct) / NSEC_PER_USEC)
 #define usecs_to_cputime(__usecs)      ((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs)    usecs_to_cputime(__usecs)
 
 /*
  * Convert cputime <-> seconds
index 43f984e93970b8acbabbfd6554920a4b4fb265f4..303192fc9260d50f44cdf6e7ef0371a2fc41d9da 100644 (file)
 #define __NR_clock_adjtime     342
 #define __NR_syncfs            343
 #define __NR_setns             344
+#define __NR_process_vm_readv  345
+#define __NR_process_vm_writev 346
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            345
+#define NR_syscalls            347
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index c468f2edaa85ec0cd2356e392a0f0e7efddb8218..ce827b376110a6a815b4ccece0a066e5ad66cc59 100644 (file)
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
        .long sys_clock_adjtime
        .long sys_syncfs
        .long sys_setns
+       .long sys_process_vm_readv      /* 345 */
+       .long sys_process_vm_writev
 
diff --git a/arch/microblaze/include/asm/namei.h b/arch/microblaze/include/asm/namei.h
deleted file mode 100644 (file)
index 61d60b8..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_NAMEI_H
-#define _ASM_MICROBLAZE_NAMEI_H
-
-#ifdef __KERNEL__
-
-/* This dummy routine maybe changed to something useful
- * for /usr/gnemul/ emulation stuff.
- * Look at asm-sparc/namei.h for details.
- */
-#define __emul_prefix() NULL
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_MICROBLAZE_NAMEI_H */
index 975c20327bb152e4774f24f05a273bc4bbeb3a28..0a430e06f5e5e11f3989265a1f49336d1b10abee 100644 (file)
@@ -17,8 +17,6 @@
 
 static struct map_info flash_map;
 static struct mtd_info *mymtd;
-static int nr_parts;
-static struct mtd_partition *parts;
 static const char *part_probe_types[] = {
        "cmdlinepart",
 #ifdef CONFIG_MTD_REDBOOT_PARTS
@@ -61,11 +59,8 @@ static int __init flash_init(void)
                mymtd = do_map_probe("cfi_probe", &flash_map);
                if (mymtd) {
                        mymtd->owner = THIS_MODULE;
-
-                       nr_parts = parse_mtd_partitions(mymtd,
-                                                       part_probe_types,
-                                                       &parts, 0);
-                       mtd_device_register(mymtd, parts, nr_parts);
+                       mtd_device_parse_register(mymtd, part_probe_types,
+                                                 0, NULL, 0);
                } else {
                        pr_err("Failed to register MTD device for flash\n");
                }
index 8b606423bbd7f84dcacd4b043297844fc59200d4..efcfff4d4627c55f4238bfd3361fe937be25465e 100644 (file)
@@ -207,8 +207,9 @@ void octeon_prepare_cpus(unsigned int max_cpus)
         * the other bits alone.
         */
        cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
-       if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
-                       "SMP-IPI", mailbox_interrupt)) {
+       if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
+                       IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
+                       mailbox_interrupt)) {
                panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
        }
 }
index 708f08761406fe3bfdbd308822f163a2ae7c9822..cae42259d6da9cc49efd4ceb8bccdbad941fe3d0 100644 (file)
@@ -50,7 +50,7 @@ void __init prom_init(void)
 
        /* arg[0] is "g", the rest is boot parameters */
        for (i = 1; i < argc; i++) {
-               if (strlen(arcs_cmdline) + strlen(arg[i] + 1)
+               if (strlen(arcs_cmdline) + strlen(arg[i]) + 1
                    >= sizeof(arcs_cmdline))
                        break;
                strcat(arcs_cmdline, arg[i]);
index d08d7c6721394ea269eb0f41c29da264b3638a70..c523123df380f41be9c131753f649e83fec5226d 100644 (file)
@@ -95,7 +95,7 @@ struct mace_video {
  * Ethernet interface
  */
 struct mace_ethernet {
-       volatile unsigned long mac_ctrl;
+       volatile u64 mac_ctrl;
        volatile unsigned long int_stat;
        volatile unsigned long dma_ctrl;
        volatile unsigned long timer;
index 76961cabeedfe1cdac4caaef7e9989ca04a345be..2ef17e8df40346f07aea6e01431186686214a1f7 100644 (file)
@@ -36,6 +36,8 @@ static inline int gpio_get_value(unsigned gpio)
        return -EINVAL;
 }
 
+#define gpio_get_value_cansleep        gpio_get_value
+
 static inline void gpio_set_value(unsigned gpio, int value)
 {
        switch (bcm47xx_bus_type) {
@@ -54,6 +56,19 @@ static inline void gpio_set_value(unsigned gpio, int value)
        }
 }
 
+#define gpio_set_value_cansleep gpio_set_value
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+       return 0;
+}
+
+static inline int gpio_is_valid(unsigned gpio)
+{
+       return gpio < (BCM47XX_EXTIF_GPIO_LINES + BCM47XX_CHIPCO_GPIO_LINES);
+}
+
+
 static inline int gpio_direction_input(unsigned gpio)
 {
        switch (bcm47xx_bus_type) {
@@ -137,7 +152,4 @@ static inline int gpio_polarity(unsigned gpio, int value)
 }
 
 
-/* cansleep wrappers */
-#include <asm-generic/gpio.h>
-
 #endif /* __BCM47XX_GPIO_H */
index ecea7871dec28f6e4e44e1652a2d10b87e8a87b1..d8dad5340ea30d22eac825883012a14341fbf157 100644 (file)
 #define __NR_syncfs                    (__NR_Linux + 342)
 #define __NR_sendmmsg                  (__NR_Linux + 343)
 #define __NR_setns                     (__NR_Linux + 344)
+#define __NR_process_vm_readv          (__NR_Linux + 345)
+#define __NR_process_vm_writev         (__NR_Linux + 346)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            344
+#define __NR_Linux_syscalls            346
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                344
+#define __NR_O32_Linux_syscalls                346
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_syncfs                    (__NR_Linux + 301)
 #define __NR_sendmmsg                  (__NR_Linux + 302)
 #define __NR_setns                     (__NR_Linux + 303)
+#define __NR_process_vm_readv          (__NR_Linux + 304)
+#define __NR_process_vm_writev         (__NR_Linux + 305)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            303
+#define __NR_Linux_syscalls            305
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         303
+#define __NR_64_Linux_syscalls         305
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_syncfs                    (__NR_Linux + 306)
 #define __NR_sendmmsg                  (__NR_Linux + 307)
 #define __NR_setns                     (__NR_Linux + 308)
+#define __NR_process_vm_readv          (__NR_Linux + 309)
+#define __NR_process_vm_writev         (__NR_Linux + 310)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            308
+#define __NR_Linux_syscalls            310
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                308
+#define __NR_N32_Linux_syscalls                310
 
 #ifdef __KERNEL__
 
index 98c5a9737c14d2c200a161640122f9a8af053e88..e2d8e199be323345a6feaf3eb03a738e515dd5f5 100644 (file)
@@ -103,19 +103,10 @@ static int c0_compare_int_pending(void)
 
 /*
  * Compare interrupt can be routed and latched outside the core,
- * so a single execution hazard barrier may not be enough to give
- * it time to clear as seen in the Cause register.  4 time the
- * pipeline depth seems reasonably conservative, and empirically
- * works better in configurations with high CPU/bus clock ratios.
+ * so wait up to worst case number of cycle counter ticks for timer interrupt
+ * changes to propagate to the cause register.
  */
-
-#define compare_change_hazard() \
-       do { \
-               irq_disable_hazard(); \
-               irq_disable_hazard(); \
-               irq_disable_hazard(); \
-               irq_disable_hazard(); \
-       } while (0)
+#define COMPARE_INT_SEEN_TICKS 50
 
 int c0_compare_int_usable(void)
 {
@@ -126,8 +117,12 @@ int c0_compare_int_usable(void)
         * IP7 already pending?  Try to clear it by acking the timer.
         */
        if (c0_compare_int_pending()) {
-               write_c0_compare(read_c0_count());
-               compare_change_hazard();
+               cnt = read_c0_count();
+               write_c0_compare(cnt);
+               back_to_back_c0_hazard();
+               while (read_c0_count() < (cnt  + COMPARE_INT_SEEN_TICKS))
+                       if (!c0_compare_int_pending())
+                               break;
                if (c0_compare_int_pending())
                        return 0;
        }
@@ -136,7 +131,7 @@ int c0_compare_int_usable(void)
                cnt = read_c0_count();
                cnt += delta;
                write_c0_compare(cnt);
-               compare_change_hazard();
+               back_to_back_c0_hazard();
                if ((int)(read_c0_count() - cnt) < 0)
                    break;
                /* increase delta if the timer was already expired */
@@ -145,12 +140,17 @@ int c0_compare_int_usable(void)
        while ((int)(read_c0_count() - cnt) <= 0)
                ;       /* Wait for expiry  */
 
-       compare_change_hazard();
+       while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+               if (c0_compare_int_pending())
+                       break;
        if (!c0_compare_int_pending())
                return 0;
-
-       write_c0_compare(read_c0_count());
-       compare_change_hazard();
+       cnt = read_c0_count();
+       write_c0_compare(cnt);
+       back_to_back_c0_hazard();
+       while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+               if (!c0_compare_int_pending())
+                       break;
        if (c0_compare_int_pending())
                return 0;
 
index cefc6e259bafd3acd986588af2c87a6e3ce0f32d..5426779d9fdb77700d05b897e7af3acff8d753e1 100644 (file)
@@ -7,6 +7,7 @@
  * for more details.
  */
 
+#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/platform_device.h>
 
index 4f2971bcf8e5464577885b5ecc64db11c388d295..315fc0b250f8fe3373684f12f5c0437fd3d0b7b1 100644 (file)
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
        if (!atomic_inc_not_zero(&active_events)) {
                if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
                        atomic_dec(&active_events);
-                       return -ENOSPC;
+                       return -EINVAL;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
        memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 
        if (!validate_event(&fake_cpuc, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_cpuc, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_cpuc, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
index 47920657968d2e65368edd0c2cbd39c6a5052ef2..a632bc144efa1b9ca977a582864530e33ee039cb 100644 (file)
@@ -591,6 +591,8 @@ einval:     li      v0, -ENOSYS
        sys     sys_syncfs              1
        sys     sys_sendmmsg            4
        sys     sys_setns               2
+       sys     sys_process_vm_readv    6       /* 4345 */
+       sys     sys_process_vm_writev   6
        .endm
 
        /* We pre-compute the number of _instruction_ bytes needed to
index fb7334bea7316aedd8071ff41d63178539504cc8..3b5a5e9ae49c132640c95a87037e48ac95e5b932 100644 (file)
@@ -430,4 +430,6 @@ sys_call_table:
        PTR     sys_syncfs
        PTR     sys_sendmmsg
        PTR     sys_setns
+       PTR     sys_process_vm_readv
+       PTR     sys_process_vm_writev           /* 5305 */
        .size   sys_call_table,.-sys_call_table
index 6de1f598346e3bfd527f1d3cad321160aedb3c02..6be6f7020923f1224260a0bf4df420a2d951db7a 100644 (file)
@@ -430,4 +430,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_syncfs
        PTR     compat_sys_sendmmsg
        PTR     sys_setns
+       PTR     compat_sys_process_vm_readv
+       PTR     compat_sys_process_vm_writev    /* 6310 */
        .size   sysn32_call_table,.-sysn32_call_table
index 1d813169e453ea3709b0410521b8526638d96a98..54228553691d60903559706c00bc8a0d16a05b86 100644 (file)
@@ -548,4 +548,6 @@ sys_call_table:
        PTR     sys_syncfs
        PTR     compat_sys_sendmmsg
        PTR     sys_setns
+       PTR     compat_sys_process_vm_readv     /* 4345 */
+       PTR     compat_sys_process_vm_writev
        .size   sys_call_table,.-sys_call_table
index 261ccbc0774016aa6d055eb19dd82067f7ad65e8..5c8a49d55054dffce696066bad88d1863c5e7f35 100644 (file)
@@ -1596,7 +1596,8 @@ void __cpuinit per_cpu_trap_init(void)
        }
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-       cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+       if (!cpu_data[cpu].asid_cache)
+               cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
 
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
index 7e9c0ffc11a51c42bd58b1f465d54481aab6023c..77ed70fc2fe5953cd9bf31aa2514820b41e2bbd6 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
  */
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
index 44a36771c819bebcb03732744566764c6d60b574..de1cb2bcd79a167e36b9602d14d6fa889e9a2b1d 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
index 56ba007bf1e59ee37d5a7a2627d603d635df447d..e34fcfd0d5ca5763983c2b0a248352275b40f49b 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/clk.h>
 #include <asm/bootinfo.h>
 #include <asm/time.h>
index 9b8af77ed0f9868baf4d1873121213cc6a8c556c..1ff6c9d6cb93eb6e21109f9a8c0a3f3941cba4a5 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <asm/bootinfo.h>
index 22d823acd536a3bcfee7708c90a593f57835d143..652258309c9c14220ebdbcaa5a43ac1faa01b7cc 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/clk.h>
 
index ddd39593c581453237b26e831716f853cd349a9d..696b1a3e06421d605097535384427ba7632e3396 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/clk.h>
 
index d0e32ab2ea074b9fa75ebd56e2c77ff1a15c994a..d614aa7ff07f48f89789694236aadf90b5f30004 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/mtd/physmap.h>
index 4278a459d6c483afd2f371fb856bff8eb1851714..cbb6ae5747b97f81c67c8d77916bd10bad34e125 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
+#include <linux/export.h>
 
 #include <lantiq_soc.h>
 #include <xway_dma.h>
index a321451a54554bc0cba17a0458e61436ce62801e..d2fa98f3c78d8185d6bfec384b88e23720829b88 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/ioport.h>
index a479355abdb92755759366fa1b50325474d93810..b91c7f17f10f043ae906c322e12669438e7f562a 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
index 67d59d69034063ab37dcffa6e4f135c7a531396b..ff9991cddeaa8c3fd6e44048730b09b6d24efb1f 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
index abe49f4db57f1ad670b6c6c6a6709bea6dcfe1e6..ae4959ae865c464cddd5da9393664f148fc448ca 100644 (file)
@@ -6,7 +6,7 @@
  *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/clk.h>
 #include <asm/bootinfo.h>
 #include <asm/time.h>
index 1686692ac24d52eee2338feaebd3cdf943bb22e3..2228133ca3566a0e6a192a9191625f7f2078db28 100644 (file)
@@ -6,7 +6,7 @@
  *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/clk.h>
 #include <asm/bootinfo.h>
 #include <asm/time.h>
index a1be36d0e490481b9fcdfdf6579e576ee115c111..3d41f0bb5bf73f0d84108129a979642770e632f7 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/pm.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <asm/reboot.h>
 
 #include <lantiq_soc.h>
diff --git a/arch/mips/nxp/pnx8550/common/pci.c b/arch/mips/nxp/pnx8550/common/pci.c
deleted file mode 100644 (file)
index 98e86dd..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *
- * Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <pci.h>
-#include <glb.h>
-#include <nand.h>
-
-static struct resource pci_io_resource = {
-       .start  = PNX8550_PCIIO + 0x1000,       /* reserve regacy I/O space */
-       .end    = PNX8550_PCIIO + PNX8550_PCIIO_SIZE,
-       .name   = "pci IO space",
-       .flags  = IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
-       .start  = PNX8550_PCIMEM,
-       .end    = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1,
-       .name   = "pci memory space",
-       .flags  = IORESOURCE_MEM
-};
-
-extern struct pci_ops pnx8550_pci_ops;
-
-static struct pci_controller pnx8550_controller = {
-       .pci_ops        = &pnx8550_pci_ops,
-       .io_map_base    = PNX8550_PORT_BASE,
-       .io_resource    = &pci_io_resource,
-       .mem_resource   = &pci_mem_resource,
-};
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-static inline unsigned long get_system_mem_size(void)
-{
-       /* Read IP2031_RANK0_ADDR_LO */
-       unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
-       /* Read IP2031_RANK1_ADDR_HI */
-       unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
-       return dram_r1_hi - dram_r0_lo + 1;
-}
-
-static int __init pnx8550_pci_setup(void)
-{
-       int pci_mem_code;
-       int mem_size = get_system_mem_size() >> 20;
-
-       /* Clear the Global 2 Register, PCI Inta Output Enable Registers
-          Bit 1:Enable DAC Powerdown
-         -> 0:DACs are enabled and are working normally
-            1:DACs are powerdown
-          Bit 0:Enable of PCI inta output
-         -> 0 = Disable PCI inta output
-            1 = Enable PCI inta output
-       */
-       PNX8550_GLB2_ENAB_INTA_O = 0;
-
-       /* Calc the PCI mem size code */
-       if (mem_size >= 128)
-               pci_mem_code = SIZE_128M;
-       else if (mem_size >= 64)
-               pci_mem_code = SIZE_64M;
-       else if (mem_size >= 32)
-               pci_mem_code = SIZE_32M;
-       else
-               pci_mem_code = SIZE_16M;
-
-       /* Set PCI_XIO registers */
-       outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO);
-       outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI);
-       outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO);
-       outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI);
-
-       /* Send memory transaction via PCI_BASE2 */
-       outl(0x00000001, PCI_BASE | PCI_IO);
-
-       /* Unlock the setup register */
-       outl(0xca, PCI_BASE | PCI_UNLOCKREG);
-
-       /*
-        * BAR0 of PNX8550 (pci base 10) must be zero in order for ide
-        * to work, and in order for bus_to_baddr to work without any
-        * hacks.
-        */
-       outl(0x00000000, PCI_BASE | PCI_BASE10);
-
-       /*
-        *These two bars are set by default or the boot code.
-        * However, it's safer to set them here so we're not boot
-        * code dependent.
-        */
-       outl(0x1be00000, PCI_BASE | PCI_BASE14);  /* PNX MMIO */
-       outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18);  /* XIO      */
-
-       outl(PCI_EN_TA |
-            PCI_EN_PCI2MMI |
-            PCI_EN_XIO |
-            PCI_SETUP_BASE18_SIZE(SIZE_32M) |
-            PCI_SETUP_BASE18_EN |
-            PCI_SETUP_BASE14_EN |
-            PCI_SETUP_BASE10_PREF |
-            PCI_SETUP_BASE10_SIZE(pci_mem_code) |
-            PCI_SETUP_CFGMANAGE_EN |
-            PCI_SETUP_PCIARB_EN,
-            PCI_BASE |
-            PCI_SETUP);        /* PCI_SETUP */
-       outl(0x00000000, PCI_BASE | PCI_CTRL);  /* PCI_CONTROL */
-
-       register_pci_controller(&pnx8550_controller);
-
-       return 0;
-}
-
-arch_initcall(pnx8550_pci_setup);
diff --git a/arch/mips/nxp/pnx8550/common/setup.c b/arch/mips/nxp/pnx8550/common/setup.c
deleted file mode 100644 (file)
index 71adac3..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- *
- * 2.6 port, Embedded Alley Solutions, Inc
- *
- *  Based on Per Hallsmark, per.hallsmark@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/serial_pnx8xxx.h>
-#include <linux/pm.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-#include <asm/time.h>
-
-#include <glb.h>
-#include <int.h>
-#include <pci.h>
-#include <uart.h>
-#include <nand.h>
-
-extern void __init board_setup(void);
-extern void pnx8550_machine_restart(char *);
-extern void pnx8550_machine_halt(void);
-extern void pnx8550_machine_power_off(void);
-extern struct resource ioport_resource;
-extern struct resource iomem_resource;
-extern char *prom_getcmdline(void);
-
-struct resource standard_io_resources[] = {
-       {
-               .start  = 0x00,
-               .end    = 0x1f,
-               .name   = "dma1",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x40,
-               .end    = 0x5f,
-               .name   = "timer",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x80,
-               .end    = 0x8f,
-               .name   = "dma page reg",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0xc0,
-               .end    = 0xdf,
-               .name   = "dma2",
-               .flags  = IORESOURCE_BUSY
-       },
-};
-
-#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
-
-extern struct resource pci_io_resource;
-extern struct resource pci_mem_resource;
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-unsigned long get_system_mem_size(void)
-{
-       /* Read IP2031_RANK0_ADDR_LO */
-       unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
-       /* Read IP2031_RANK1_ADDR_HI */
-       unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
-       return dram_r1_hi - dram_r0_lo + 1;
-}
-
-int pnx8550_console_port = -1;
-
-void __init plat_mem_setup(void)
-{
-       int i;
-       char* argptr;
-
-       board_setup();  /* board specific setup */
-
-        _machine_restart = pnx8550_machine_restart;
-        _machine_halt = pnx8550_machine_halt;
-        pm_power_off = pnx8550_machine_power_off;
-
-       /* Clear the Global 2 Register, PCI Inta Output Enable Registers
-          Bit 1:Enable DAC Powerdown
-         -> 0:DACs are enabled and are working normally
-            1:DACs are powerdown
-          Bit 0:Enable of PCI inta output
-         -> 0 = Disable PCI inta output
-            1 = Enable PCI inta output
-       */
-       PNX8550_GLB2_ENAB_INTA_O = 0;
-
-       /* IO/MEM resources. */
-       set_io_port_base(PNX8550_PORT_BASE);
-       ioport_resource.start = 0;
-       ioport_resource.end = ~0;
-       iomem_resource.start = 0;
-       iomem_resource.end = ~0;
-
-       /* Request I/O space for devices on this board */
-       for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-               request_resource(&ioport_resource, standard_io_resources + i);
-
-       /* Place the Mode Control bit for GPIO pin 16 in primary function */
-       /* Pin 16 is used by UART1, UA1_TX                                */
-       outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
-                       (PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
-                       PNX8550_GPIO_MC1);
-
-       argptr = prom_getcmdline();
-       if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
-               argptr += strlen("console=ttyS");
-               pnx8550_console_port = *argptr == '0' ? 0 : 1;
-
-               /* We must initialize the UART (console) before early printk */
-               /* Set LCR to 8-bit and BAUD to 38400 (no 5)                */
-               ip3106_lcr(UART_BASE, pnx8550_console_port) =
-                       PNX8XXX_UART_LCR_8BIT;
-               ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
-       }
-}
index 4ee57104e47bb78ef4b8957480e04de5ee8198b9..b5ce041cdafb54667b7c11738892ab7e9020000a 100644 (file)
@@ -7,6 +7,7 @@
  * Support for all devices (greater than 16) added by David Gathright.
  */
 
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
index 8656388b34bd4fac95d7dadf6584d9fe6cdbceb6..be1e1afe12c3ce3aa527b4142d8f8b7aee17143e 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 
 #include <asm/pci.h>
index cf4c868715acaaba37e9aec317d0224ca5b6d060..dcc926e06fcec573a4478b2359b2218d5ca942cf 100644 (file)
@@ -102,7 +102,7 @@ void __init prom_init(void)
 
        /* Get the boot parameters */
        for (i = 1; i < argc; i++) {
-               if (strlen(arcs_cmdline) + strlen(arg[i] + 1) >=
+               if (strlen(arcs_cmdline) + strlen(arg[i]) + 1 >=
                    sizeof(arcs_cmdline))
                        break;
 
index b177caa56d95c5880f9a7d0732f1736dbb908598..951e18f5335b268965d3880881d900bed63e7356 100644 (file)
@@ -345,7 +345,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
        bool "kexec system call (EXPERIMENTAL)"
-       depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL
+       depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 57af16edc19231ad0b4511f30174607a57711494..70ba0c0a1223d475cd2d2c2cdfddbf5b901a7527 100644 (file)
@@ -255,12 +255,6 @@ checkbin:
                echo 'disable kernel modules' ; \
                false ; \
        fi
-       @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
-               echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
-               echo 'correctly with old versions of binutils.' ; \
-               echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
-               false ; \
-       fi
 
 CLEAN_FILES += $(TOUT)
 
index d9b776740a6739707d26a9ad836a39533bc8718c..d3b478242ea9586b793f9def431a6d9a4ea00c80 100644 (file)
                        interrupt-parent = <&mpic>;
                        interrupts = <16 2>;
                        interrupt-map-mask = <0xf800 0 0 7>;
+                       /* IRQ[0:3] are pulled up on board, set to active-low */
                        interrupt-map = <
                                /* IDSEL 0x0 */
                                0000 0 0 1 &mpic 0 1
                        interrupt-parent = <&mpic>;
                        interrupts = <16 2>;
                        interrupt-map-mask = <0xf800 0 0 7>;
+                       /*
+                        * IRQ[4:6] only for PCIe, set to active-high,
+                        * IRQ[7] is pulled up on board, set to active-low
+                        */
                        interrupt-map = <
                                /* IDSEL 0x0 */
-                               0000 0 0 1 &mpic 4 1
-                               0000 0 0 2 &mpic 5 1
-                               0000 0 0 3 &mpic 6 1
+                               0000 0 0 1 &mpic 4 2
+                               0000 0 0 2 &mpic 5 2
+                               0000 0 0 3 &mpic 6 2
                                0000 0 0 4 &mpic 7 1
                                >;
                        ranges = <0x2000000 0x0 0xa0000000
                        interrupt-parent = <&mpic>;
                        interrupts = <16 2>;
                        interrupt-map-mask = <0xf800 0 0 7>;
+                       /*
+                        * IRQ[8:10] are pulled up on board, set to active-low
+                        * IRQ[11] only for PCIe, set to active-high,
+                        */
                        interrupt-map = <
                                /* IDSEL 0x0 */
                                0000 0 0 1 &mpic 8 1
                                0000 0 0 2 &mpic 9 1
                                0000 0 0 3 &mpic 10 1
-                               0000 0 0 4 &mpic 11 1
+                               0000 0 0 4 &mpic 11 2
                                >;
                        ranges = <0x2000000 0x0 0x80000000
                                  0x2000000 0x0 0x80000000
index 9452c3c05114e523033eebb278d7f78811890a87..d918752b12005d81c60a7ffb37a73db94dfaf055 100644 (file)
                ranges = <
                        0 0x0 0xfc000000 0x04000000     // NOR FLASH bank 1
                        1 0x0 0xf8000000 0x08000000     // NOR FLASH bank 0
-                       2 0x0 0xa3000000 0x00008000     // CAN (2 x i82527)
+                       2 0x0 0xa3000000 0x00008000     // CAN (2 x CC770)
                        3 0x0 0xa3010000 0x00008000     // NAND FLASH
 
                >;
                };
 
                /* Note: CAN support needs be enabled in U-Boot */
-               can0@2,0 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,0 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x0 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+                       bosch,clock-out-frequency = <16000000>;
                };
 
-               can1@2,100 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,100 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x100 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
                };
 
                /* Note: NAND support needs to be enabled in U-Boot */
index 619776f72c904c611e9507d44db4bee1200e6688..988d887c97fc1d973db78cdd0064340fbf475816 100644 (file)
                ranges = <
                        0 0x0 0xfc000000 0x04000000     // NOR FLASH bank 1
                        1 0x0 0xf8000000 0x08000000     // NOR FLASH bank 0
-                       2 0x0 0xe3000000 0x00008000     // CAN (2 x i82527)
+                       2 0x0 0xe3000000 0x00008000     // CAN (2 x CC770)
                        3 0x0 0xe3010000 0x00008000     // NAND FLASH
 
                >;
                };
 
                /* Note: CAN support needs be enabled in U-Boot */
-               can0@2,0 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,0 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x0 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+                       bosch,clock-out-frequency = <16000000>;
                };
 
-               can1@2,100 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,100 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x100 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
                };
 
                /* Note: NAND support needs to be enabled in U-Boot */
index f6da7ec49a8e8b1e259f82b023c27787cc185847..c3dba2518d8cb06ea219906dfb6dae0e9959de2f 100644 (file)
@@ -57,6 +57,7 @@
 
                ranges = <
                        0x0 0x0 0x40000000 0x800000
+                       0x3 0x0 0xc0000000 0x200
                >;
 
                flash@0,0 {
                        bank-width = <4>;
                        device-width = <2>;
                };
+
+               /* Note: CAN support needs be enabled in U-Boot */
+               can@3,0 {
+                       compatible = "intc,82527";
+                       reg = <3 0x0 0x80>;
+                       interrupts = <8 1>;
+                       interrupt-parent = <&PIC>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+                       bosch,clock-out-frequency = <16000000>;
+               };
+
+               can@3,100 {
+                       compatible = "intc,82527";
+                       reg = <3 0x100 0x80>;
+                       interrupts = <8 1>;
+                       interrupt-parent = <&PIC>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+               };
        };
 
        soc@fff00000 {
index 6cdf1c0d2c8a10acc43796c20af80d52f7b78d64..3b98d735434142bb9240a6a2d4906d0b882a3ab2 100644 (file)
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_NDFC=m
 CONFIG_MTD_UBI=m
 CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_PROC_DEVICETREE=y
index e2a4c26ad37793874dce3902d561dd749c78681d..02e41b53488d8d8574f42c6a4e0ce1f99df9aaa8 100644 (file)
@@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2         # atomic_add_return\n\
        add     %0,%1,%0\n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %0,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (a), "r" (&v->counter)
        : "cc", "memory");
@@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2         # atomic_sub_return\n\
        subf    %0,%1,%0\n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %0,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (a), "r" (&v->counter)
        : "cc", "memory");
@@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%1         # atomic_inc_return\n\
        addic   %0,%0,1\n"
        PPC405_ERR77(0,%1)
 "      stwcx.  %0,0,%1 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (&v->counter)
        : "cc", "xer", "memory");
@@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%1         # atomic_dec_return\n\
        addic   %0,%0,-1\n"
        PPC405_ERR77(0,%1)
 "      stwcx.  %0,0,%1\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (&v->counter)
        : "cc", "xer", "memory");
@@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        int t;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%1         # __atomic_add_unless\n\
        cmpw    0,%0,%3 \n\
        beq-    2f \n\
@@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        PPC405_ERR77(0,%2)
 "      stwcx.  %0,0,%1 \n\
        bne-    1b \n"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
 "      subf    %0,%2,%0 \n\
 2:"
        : "=&r" (t)
@@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%1         # atomic_dec_if_positive\n\
        cmpwi   %0,1\n\
        addi    %0,%0,-1\n\
@@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
        PPC405_ERR77(0,%1)
 "      stwcx.  %0,0,%1\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"    : "=&b" (t)
        : "r" (&v->counter)
@@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2         # atomic64_add_return\n\
        add     %0,%1,%0\n\
        stdcx.  %0,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (a), "r" (&v->counter)
        : "cc", "memory");
@@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2         # atomic64_sub_return\n\
        subf    %0,%1,%0\n\
        stdcx.  %0,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (a), "r" (&v->counter)
        : "cc", "memory");
@@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%1         # atomic64_inc_return\n\
        addic   %0,%0,1\n\
        stdcx.  %0,0,%1 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (&v->counter)
        : "cc", "xer", "memory");
@@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%1         # atomic64_dec_return\n\
        addic   %0,%0,-1\n\
        stdcx.  %0,0,%1\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (t)
        : "r" (&v->counter)
        : "cc", "xer", "memory");
@@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%1         # atomic64_dec_if_positive\n\
        addic.  %0,%0,-1\n\
        blt-    2f\n\
        stdcx.  %0,0,%1\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"    : "=&r" (t)
        : "r" (&v->counter)
@@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
        long t;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%1         # __atomic_add_unless\n\
        cmpd    0,%0,%3 \n\
        beq-    2f \n\
        add     %0,%2,%0 \n"
 "      stdcx.  %0,0,%1 \n\
        bne-    1b \n"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
 "      subf    %0,%2,%0 \n\
 2:"
        : "=&r" (t)
index e137afcc10fae89cb0987bbe982af5653916e9e1..efdc92618b38ddfa7da075a53fb3ba93e9c71dae 100644 (file)
@@ -124,14 +124,14 @@ static __inline__ unsigned long fn(                       \
        return (old & mask);                            \
 }
 
-DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER,
-             PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
+             PPC_ATOMIC_EXIT_BARRIER, 0)
 DEFINE_TESTOP(test_and_set_bits_lock, or, "",
              PPC_ACQUIRE_BARRIER, 1)
-DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER,
-             PPC_ACQUIRE_BARRIER, 0)
-DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER,
-             PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
+             PPC_ATOMIC_EXIT_BARRIER, 0)
+DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
+             PPC_ATOMIC_EXIT_BARRIER, 0)
 
 static __inline__ int test_and_set_bit(unsigned long nr,
                                       volatile unsigned long *addr)
index 1cf20bdfbecaada5bb2b88d439d776c32af2558c..98b7c4b49c9d0586cce4d7132593ee359c3fd803 100644 (file)
@@ -150,6 +150,8 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
        return ct;
 }
 
+#define usecs_to_cputime64(us)         usecs_to_cputime(us)
+
 /*
  * Convert cputime <-> seconds
  */
index c94e4a3fe2ef3de09decfd1aa47bda3b77342f53..2a9cf845473bb51a4aea7d5317dde0a4b5ab37ed 100644 (file)
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
   __asm__ __volatile ( \
-       PPC_RELEASE_BARRIER \
+       PPC_ATOMIC_ENTRY_BARRIER \
 "1:    lwarx   %0,0,%2\n" \
        insn \
        PPC405_ERR77(0, %2) \
 "2:    stwcx.  %1,0,%2\n" \
        "bne-   1b\n" \
+       PPC_ATOMIC_EXIT_BARRIER \
        "li     %1,0\n" \
 "3:    .section .fixup,\"ax\"\n" \
 "4:    li      %1,%3\n" \
@@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
         __asm__ __volatile__ (
-        PPC_RELEASE_BARRIER
+        PPC_ATOMIC_ENTRY_BARRIER
 "1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
         cmpw    0,%1,%4\n\
         bne-    3f\n"
         PPC405_ERR77(0,%3)
 "2:     stwcx.  %5,0,%3\n\
         bne-    1b\n"
-        PPC_ACQUIRE_BARRIER
+        PPC_ATOMIC_EXIT_BARRIER
 "3:    .section .fixup,\"ax\"\n\
 4:     li      %0,%6\n\
        b       3b\n\
index 08fe69edcd103f7882a3e172b7c5a55764009550..0ad432bc81d66259d82e4e94f691c46e4c765555 100644 (file)
@@ -148,12 +148,6 @@ struct kvm_regs {
 #define KVM_SREGS_E_UPDATE_DEC         (1 << 2)
 #define KVM_SREGS_E_UPDATE_DBSR                (1 << 3)
 
-/*
- * Book3S special bits to indicate contents in the struct by maintaining
- * backwards compatibility with older structs. If adding a new field,
- * please make sure to add a flag for that new field */
-#define KVM_SREGS_S_HIOR               (1 << 0)
-
 /*
  * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
  * previous KVM_GET_REGS.
@@ -179,8 +173,6 @@ struct kvm_sregs {
                                __u64 ibat[8]; 
                                __u64 dbat[8]; 
                        } ppc32;
-                       __u64 flags; /* KVM_SREGS_S_ */
-                       __u64 hior;
                } s;
                struct {
                        union {
index a384ffdf33de0af850ca03f18aab3e6c36515540..69c7377d2071aa821fd1ff9529cafd4e49d10081 100644 (file)
@@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s {
 #endif
        int context_id[SID_CONTEXTS];
 
-       bool hior_sregs;                /* HIOR is set by SREGS, not PVR */
-
        struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
        struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
        struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
@@ -383,39 +381,6 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
 }
 #endif
 
-static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
-                                            unsigned long pte_index)
-{
-       unsigned long rb, va_low;
-
-       rb = (v & ~0x7fUL) << 16;               /* AVA field */
-       va_low = pte_index >> 3;
-       if (v & HPTE_V_SECONDARY)
-               va_low = ~va_low;
-       /* xor vsid from AVA */
-       if (!(v & HPTE_V_1TB_SEG))
-               va_low ^= v >> 12;
-       else
-               va_low ^= v >> 24;
-       va_low &= 0x7ff;
-       if (v & HPTE_V_LARGE) {
-               rb |= 1;                        /* L field */
-               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
-                   (r & 0xff000)) {
-                       /* non-16MB large page, must be 64k */
-                       /* (masks depend on page size) */
-                       rb |= 0x1000;           /* page encoding in LP field */
-                       rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
-                       rb |= (va_low & 0xfe);  /* AVAL field (P7 doesn't seem to care) */
-               }
-       } else {
-               /* 4kB page */
-               rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of VA */
-       }
-       rb |= (v >> 54) & 0x300;                /* B field */
-       return rb;
-}
-
 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
  * instruction for the OSI hypercalls */
 #define OSI_SC_MAGIC_R3                        0x113724FA
index e43fe42b9875308b49e13faa8a67ef91fe125200..d0ac94f98f9e3a2dcca21ff90631b841e4ab5060 100644 (file)
@@ -29,4 +29,37 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
 
 #define SPAPR_TCE_SHIFT                12
 
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+                                            unsigned long pte_index)
+{
+       unsigned long rb, va_low;
+
+       rb = (v & ~0x7fUL) << 16;               /* AVA field */
+       va_low = pte_index >> 3;
+       if (v & HPTE_V_SECONDARY)
+               va_low = ~va_low;
+       /* xor vsid from AVA */
+       if (!(v & HPTE_V_1TB_SEG))
+               va_low ^= v >> 12;
+       else
+               va_low ^= v >> 24;
+       va_low &= 0x7ff;
+       if (v & HPTE_V_LARGE) {
+               rb |= 1;                        /* L field */
+               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+                   (r & 0xff000)) {
+                       /* non-16MB large page, must be 64k */
+                       /* (masks depend on page size) */
+                       rb |= 0x1000;           /* page encoding in LP field */
+                       rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+                       rb |= (va_low & 0xfe);  /* AVAL field (P7 doesn't seem to care) */
+               }
+       } else {
+               /* 4kB page */
+               rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of VA */
+       }
+       rb |= (v >> 54) & 0x300;                /* B field */
+       return rb;
+}
+
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
index 28cdbd9f399c70fb5b9a9adf80863ca9758e390f..03c48e819c8e3ff3bc600242dc85ef9111aca478 100644 (file)
@@ -31,7 +31,7 @@
 
 #define MSR_           MSR_ME | MSR_CE
 #define MSR_KERNEL     MSR_ | MSR_64BIT
-#define MSR_USER32     MSR_ | MSR_PR | MSR_EE | MSR_DE
+#define MSR_USER32     MSR_ | MSR_PR | MSR_EE
 #define MSR_USER64     MSR_USER32 | MSR_64BIT
 #elif defined (CONFIG_40x)
 #define MSR_KERNEL     (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
index 6fbce725c710f97453079205e190c02c7365cd0b..a0f358d4a00cd57ba3b22fbbf1024c7daba65fa4 100644 (file)
@@ -8,7 +8,7 @@
 
 #ifdef __powerpc64__
 
-extern char _end[];
+extern char __end_interrupts[];
 
 static inline int in_kernel_text(unsigned long addr)
 {
index d7cab44643c51d90f1f79509939e3c734b735eba..e682a7143edb767826705243df3613cc85e305ab 100644 (file)
@@ -13,6 +13,7 @@
 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
 extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
                             void *fixup_end);
+extern void do_final_fixups(void);
 
 static inline void eieio(void)
 {
@@ -41,11 +42,15 @@ static inline void isync(void)
        START_LWSYNC_SECTION(97);                       \
        isync;                                          \
        MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
-#define PPC_ACQUIRE_BARRIER    "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
-#define PPC_RELEASE_BARRIER    stringify_in_c(LWSYNC) "\n"
+#define PPC_ACQUIRE_BARRIER     "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+#define PPC_RELEASE_BARRIER     stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_EXIT_BARRIER         "\n" stringify_in_c(sync) "\n"
 #else
 #define PPC_ACQUIRE_BARRIER
 #define PPC_RELEASE_BARRIER
+#define PPC_ATOMIC_ENTRY_BARRIER
+#define PPC_ATOMIC_EXIT_BARRIER
 #endif
 
 #endif /* __KERNEL__ */
index 56212bc0ab087c4b40bb21df3496c89849a01f06..4f80cf1ce77b84c30e905bcb8cb8d660c005c22b 100644 (file)
@@ -215,7 +215,22 @@ reenable_mmu:                              /* re-enable mmu so we can */
        stw     r9,8(r1)
        stw     r11,12(r1)
        stw     r3,ORIG_GPR3(r1)
+       /*
+        * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
+        * If from user mode there is only one stack frame on the stack, and
+        * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
+        * stack frame to make trace_hardirqs_off happy.
+        */
+       andi.   r12,r12,MSR_PR
+       beq     11f
+       stwu    r1,-16(r1)
+       bl      trace_hardirqs_off
+       addi    r1,r1,16
+       b       12f
+
+11:
        bl      trace_hardirqs_off
+12:
        lwz     r0,GPR0(r1)
        lwz     r3,ORIG_GPR3(r1)
        lwz     r4,GPR4(r1)
index 368d158d665d5e44942f403d5a3710413ece52d2..a1ed8a8c7cb42c83fc61c674735f1f7d6bbe3842 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/jump_label.h>
 #include <asm/code-patching.h>
 
+#ifdef HAVE_JUMP_LABEL
 void arch_jump_label_transform(struct jump_entry *entry,
                               enum jump_label_type type)
 {
@@ -21,3 +22,4 @@ void arch_jump_label_transform(struct jump_entry *entry,
        else
                patch_instruction(addr, PPC_INST_NOP);
 }
+#endif
index 35f27646c4ff5af053313054c953000d0be64031..2985338d0e10164e3b77ab12fcc3bb755a1a6fb3 100644 (file)
@@ -132,7 +132,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
        /* On relocatable kernels interrupts handlers and our code
           can be in different regions, so we don't patch them */
 
-       extern u32 __end_interrupts;
        if ((ulong)inst < (ulong)&__end_interrupts)
                return;
 #endif
index f7d760ab5ca1fd6851502352a7a958759c429c7f..7cd07b42ca1a505c9a9bfbb3802277336537a514 100644 (file)
@@ -738,7 +738,7 @@ relocate_new_kernel:
        mr      r5, r31
 
        li      r0, 0
-#elif defined(CONFIG_44x)  && !defined(CONFIG_47x)
+#elif defined(CONFIG_44x)  && !defined(CONFIG_PPC_47x)
 
 /*
  * Code for setting up 1:1 mapping for PPC440x for KEXEC
index 9054ca9ab4f93bcd6100cc0fcb03c7d5f4caf857..6457574c0b2f32fbaa800b11eeed0cbd8877a77e 100644 (file)
@@ -486,28 +486,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
        new_thread = &new->thread;
        old_thread = &current->thread;
 
-#if defined(CONFIG_PPC_BOOK3E_64)
-       /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
-        * we always hold the user values, so we set it now.
-        *
-        * However, we ensure the kernel MSR:DE is appropriately cleared too
-        * to avoid spurrious single step exceptions in the kernel.
-        *
-        * This will have to change to merge with the ppc32 code at some point,
-        * but I don't like much what ppc32 is doing today so there's some
-        * thinking needed there
-        */
-       if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
-               u32 dbcr0;
-
-               mtmsr(mfmsr() & ~MSR_DE);
-               isync();
-               dbcr0 = mfspr(SPRN_DBCR0);
-               dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
-               mtspr(SPRN_DBCR0, dbcr0);
-       }
-#endif /* CONFIG_PPC64_BOOK3E */
-
 #ifdef CONFIG_PPC64
        /*
         * Collect processor utilization data per process
@@ -657,7 +635,7 @@ void show_regs(struct pt_regs * regs)
        if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
                printk("CFAR: "REG"\n", regs->orig_gpr3);
        if (trap == 0x300 || trap == 0x600)
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
                printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
 #else
                printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
index b4fa66127495286aa6d4e7030480492f2b6bf90b..cc584865b3df537d7c50cfb2bda9d87a6ed24f85 100644 (file)
@@ -1579,10 +1579,8 @@ static void __init prom_instantiate_rtas(void)
                return;
 
        base = alloc_down(size, PAGE_SIZE, 0);
-       if (base == 0) {
-               prom_printf("RTAS allocation failed !\n");
-               return;
-       }
+       if (base == 0)
+               prom_panic("Could not allocate memory for RTAS\n");
 
        rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
        if (!IHANDLE_VALID(rtas_inst)) {
index c1ce86357ecb4a3a47411c18e6fe88cfd5c07d8b..ac761081511355ee68854ee02ade12f17ef93e18 100644 (file)
@@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
                         PTRRELOC(&__start___lwsync_fixup),
                         PTRRELOC(&__stop___lwsync_fixup));
 
+       do_final_fixups();
+
        return KERNELBASE + offset;
 }
 
index 1a9dea80a69b46a42b9df6ed4ce4d45f385abb85..fb9bb46e7e881a584c1532a6768622395a5045be 100644 (file)
@@ -359,6 +359,7 @@ void __init setup_system(void)
                          &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
        do_lwsync_fixups(cur_cpu_spec->cpu_features,
                         &__start___lwsync_fixup, &__stop___lwsync_fixup);
+       do_final_fixups();
 
        /*
         * Unflatten the device-tree passed by prom_init or kexec
index 78b76dc54dfb27847a24228e1bac2e2ef804354a..836a5a19eb2c3a3e45d5cab542fdb7138a8142bd 100644 (file)
@@ -97,7 +97,7 @@ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
        compat_sigset_t cset;
 
        switch (_NSIG_WORDS) {
-       case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
+       case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
                cset.sig[7] = set->sig[3] >> 32;
        case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
                cset.sig[5] = set->sig[2] >> 32;
index 4e5908264d1a98819a6713fe23f4d5a23032db41..5459d148a0f6d792fd457bd55e2105788057169b 100644 (file)
@@ -1298,14 +1298,12 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
 
                if (user_mode(regs)) {
                        current->thread.dbcr0 &= ~DBCR0_IC;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
                        if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
                                               current->thread.dbcr1))
                                regs->msr |= MSR_DE;
                        else
                                /* Make sure the IDM bit is off */
                                current->thread.dbcr0 &= ~DBCR0_IDM;
-#endif
                }
 
                _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
index 0cdbc07cec14e1cd9900a5b1eddc62e476dcdf98..336983da9e726c1e144b9a29be03934308800958 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/processor.h>
 #include <asm/cputhreads.h>
 #include <asm/page.h>
+#include <asm/hvcall.h>
 #include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
@@ -537,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
        tpaca->kvm_hstate.napping = 0;
        vcpu->cpu = vc->pcpu;
        smp_wmb();
-#ifdef CONFIG_PPC_ICP_NATIVE
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
        if (vcpu->arch.ptid) {
                tpaca->cpu_start = 0x80;
                wmb();
index bc4d50dec78b8f420f789d722c38c66d42a87ab6..e2cfb9e1e20ebdea21c726e946e1f37aef1c8c6f 100644 (file)
@@ -151,16 +151,14 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
 #ifdef CONFIG_PPC_BOOK3S_64
        if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
                kvmppc_mmu_book3s_64_init(vcpu);
-               if (!to_book3s(vcpu)->hior_sregs)
-                       to_book3s(vcpu)->hior = 0xfff00000;
+               to_book3s(vcpu)->hior = 0xfff00000;
                to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
                vcpu->arch.cpu_type = KVM_CPU_3S_64;
        } else
 #endif
        {
                kvmppc_mmu_book3s_32_init(vcpu);
-               if (!to_book3s(vcpu)->hior_sregs)
-                       to_book3s(vcpu)->hior = 0;
+               to_book3s(vcpu)->hior = 0;
                to_book3s(vcpu)->msr_mask = 0xffffffffULL;
                vcpu->arch.cpu_type = KVM_CPU_3S_32;
        }
@@ -660,10 +658,12 @@ program_interrupt:
                        ulong cmd = kvmppc_get_gpr(vcpu, 3);
                        int i;
 
+#ifdef CONFIG_KVM_BOOK3S_64_PR
                        if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
                                r = RESUME_GUEST;
                                break;
                        }
+#endif
 
                        run->papr_hcall.nr = cmd;
                        for (i = 0; i < 9; ++i) {
@@ -797,9 +797,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                }
        }
 
-       if (sregs->u.s.flags & KVM_SREGS_S_HIOR)
-               sregs->u.s.hior = to_book3s(vcpu)->hior;
-
        return 0;
 }
 
@@ -836,11 +833,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        /* Flush the MMU after messing with the segments */
        kvmppc_mmu_pte_flush(vcpu, 0, 0);
 
-       if (sregs->u.s.flags & KVM_SREGS_S_HIOR) {
-               to_book3s(vcpu)->hior_sregs = true;
-               to_book3s(vcpu)->hior = sregs->u.s.hior;
-       }
-
        return 0;
 }
 
index 26d20903f2bc5c8cce1a94637f58ea9dea73a696..8c0d45a6faf7f49db9aef9c9a495c1ef20d155d8 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/export.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
index efbf9ad872035dee6a364867c5e5e50cdd52da99..607fbdf24b8484c173cc03593ffc71d968b689fe 100644 (file)
@@ -208,7 +208,6 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_PPC_BOOKE_SREGS:
 #else
        case KVM_CAP_PPC_SEGSTATE:
-       case KVM_CAP_PPC_HIOR:
        case KVM_CAP_PPC_PAPR:
 #endif
        case KVM_CAP_PPC_UNSET_IRQ:
index 0d08d0171392a4e7ec72e21226737cc458e3a98f..7a8a7487cee8dde9d06aa86fff3bd32bdd54433e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/init.h>
 #include <asm/cputable.h>
 #include <asm/code-patching.h>
+#include <asm/page.h>
+#include <asm/sections.h>
 
 
 struct fixup_entry {
@@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
        }
 }
 
+void do_final_fixups(void)
+{
+#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
+       int *src, *dest;
+       unsigned long length;
+
+       if (PHYSICAL_START == 0)
+               return;
+
+       src = (int *)(KERNELBASE + PHYSICAL_START);
+       dest = (int *)KERNELBASE;
+       length = (__end_interrupts - _stext) / sizeof(int);
+
+       while (length--) {
+               patch_instruction(dest, *src);
+               src++;
+               dest++;
+       }
+#endif
+}
+
 #ifdef CONFIG_FTR_FIXUP_SELFTEST
 
 #define check(x)       \
index 5964371303ac4a5941eb2a0ccceca27cf6330354..8558b572e55d3e6ba2ee7b5656b9eb9f9e39b5c3 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/of_fdt.h>
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
+#include <linux/moduleparam.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
index 45023e26aea369685d88c752aa52814ac07dd27a..d7946be298b6fc1f842eaff3e14b4ae502924f3c 100644 (file)
@@ -203,7 +203,7 @@ config P3060_QDS
        select PPC_E500MC
        select PHYS_64BIT
        select SWIOTLB
-       select MPC8xxx_GPIO
+       select GPIO_MPC8XXX
        select HAS_RAPIDIO
        select PPC_EPAPR_HV_PIC
        help
index 01dcf44871e9a4c79273d14efd1fcde0b60b6ba9..081cf4ac188161b4f4bb535d0d57209bc14fd03c 100644 (file)
@@ -70,7 +70,7 @@ define_machine(p3060_qds) {
        .power_save             = e500_idle,
 };
 
-machine_device_initcall(p3060_qds, declare_of_platform_devices);
+machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
 
 #ifdef CONFIG_SWIOTLB
 machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
index e4588721ef344da86b4cc001179e2e778bbde4cd..3fe6d927ad70e928d302192d275fd0b69220af05 100644 (file)
@@ -347,7 +347,7 @@ config SIMPLE_GPIO
 
 config MCU_MPC8349EMITX
        bool "MPC8349E-mITX MCU driver"
-       depends on I2C && PPC_83xx
+       depends on I2C=y && PPC_83xx
        select GENERIC_GPIO
        select ARCH_REQUIRE_GPIOLIB
        help
index 404bc52b7806e1f769455439626bb62ac0afbcf9..1d6f4f478fe293ebd06313aa1077c1019507473d 100644 (file)
@@ -88,6 +88,7 @@ struct ps3_private {
        struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
        u64 ppe_id;
        u64 thread_id;
+       unsigned long ipi_mask;
 };
 
 static DEFINE_PER_CPU(struct ps3_private, ps3_private);
@@ -144,7 +145,11 @@ static void ps3_chip_unmask(struct irq_data *d)
 static void ps3_chip_eoi(struct irq_data *d)
 {
        const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
-       lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
+
+       /* non-IPIs are EOIed here. */
+
+       if (!test_bit(63 - d->irq, &pd->ipi_mask))
+               lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
 }
 
 /**
@@ -691,6 +696,16 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
                cpu, virq, pd->bmp.ipi_debug_brk_mask);
 }
 
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
+{
+       struct ps3_private *pd = &per_cpu(ps3_private, cpu);
+
+       set_bit(63 - virq, &pd->ipi_mask);
+
+       DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
+               cpu, virq, pd->ipi_mask);
+}
+
 static unsigned int ps3_get_irq(void)
 {
        struct ps3_private *pd = &__get_cpu_var(ps3_private);
@@ -720,6 +735,12 @@ static unsigned int ps3_get_irq(void)
                BUG();
        }
 #endif
+
+       /* IPIs are EOIed here. */
+
+       if (test_bit(63 - plug, &pd->ipi_mask))
+               lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
+
        return plug;
 }
 
index 9a196a88eda794d6d07f50abefaebc7a21902427..1a633ed0fe98744d8994d7e6367180a11ab59a96 100644 (file)
@@ -43,6 +43,7 @@ void ps3_mm_shutdown(void);
 void ps3_init_IRQ(void);
 void ps3_shutdown_IRQ(int cpu);
 void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
 
 /* smp */
 
index 4c44794faac0b344ac50663ed12122519c37b53d..efc1cd8c034ac7f47135f7af1e1582d9e722984b 100644 (file)
@@ -59,46 +59,49 @@ static void ps3_smp_message_pass(int cpu, int msg)
 
 static int ps3_smp_probe(void)
 {
-       return 2;
-}
+       int cpu;
 
-static void __init ps3_smp_setup_cpu(int cpu)
-{
-       int result;
-       unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
-       int i;
+       for (cpu = 0; cpu < 2; cpu++) {
+               int result;
+               unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
+               int i;
 
-       DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+               DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
 
-       /*
-        * Check assumptions on ps3_ipi_virqs[] indexing. If this
-        * check fails, then a different mapping of PPC_MSG_
-        * to index needs to be setup.
-        */
+               /*
+               * Check assumptions on ps3_ipi_virqs[] indexing. If this
+               * check fails, then a different mapping of PPC_MSG_
+               * to index needs to be setup.
+               */
 
-       BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION    != 0);
-       BUILD_BUG_ON(PPC_MSG_RESCHEDULE       != 1);
-       BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
-       BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK   != 3);
+               BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION    != 0);
+               BUILD_BUG_ON(PPC_MSG_RESCHEDULE       != 1);
+               BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
+               BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK   != 3);
 
-       for (i = 0; i < MSG_COUNT; i++) {
-               result = ps3_event_receive_port_setup(cpu, &virqs[i]);
+               for (i = 0; i < MSG_COUNT; i++) {
+                       result = ps3_event_receive_port_setup(cpu, &virqs[i]);
 
-               if (result)
-                       continue;
+                       if (result)
+                               continue;
 
-               DBG("%s:%d: (%d, %d) => virq %u\n",
-                       __func__, __LINE__, cpu, i, virqs[i]);
+                       DBG("%s:%d: (%d, %d) => virq %u\n",
+                               __func__, __LINE__, cpu, i, virqs[i]);
 
-               result = smp_request_message_ipi(virqs[i], i);
+                       result = smp_request_message_ipi(virqs[i], i);
 
-               if (result)
-                       virqs[i] = NO_IRQ;
-       }
+                       if (result)
+                               virqs[i] = NO_IRQ;
+                       else
+                               ps3_register_ipi_irq(cpu, virqs[i]);
+               }
 
-       ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
+               ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
 
-       DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+               DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+       }
+
+       return 2;
 }
 
 void ps3_smp_cleanup_cpu(int cpu)
@@ -121,7 +124,6 @@ static struct smp_ops_t ps3_smp_ops = {
        .probe          = ps3_smp_probe,
        .message_pass   = ps3_smp_message_pass,
        .kick_cpu       = smp_generic_kick_cpu,
-       .setup_cpu      = ps3_smp_setup_cpu,
 };
 
 void smp_init_ps3(void)
index af1a5df46b3e54ff75bb392b97ff26e8f1e72b4f..b6731e4a6646e16e81c62de273d8a6cd579b1ca7 100644 (file)
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void)
 
        if (!ehv_pic->irqhost) {
                of_node_put(np);
+               kfree(ehv_pic);
                return;
        }
 
index c4d96fa32ba557a68512b133400fef585e799eb4..d5c3c90ee6981a14d40da0175f3d3ddc0c0f77f4 100644 (file)
@@ -328,6 +328,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
 err:
        iounmap(fsl_lbc_ctrl_dev->regs);
        kfree(fsl_lbc_ctrl_dev);
+       fsl_lbc_ctrl_dev = NULL;
        return ret;
 }
 
index 3363fbc964f86c1d42676116f813a2ac71b9e0e2..ceb09cbd2329e782fe00a5c3e1326d217ab3e92b 100644 (file)
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
        /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
           that the BRG divisor must be even if you're not using divide-by-16
           mode. */
-       if (!div16 && (divisor & 1))
+       if (!div16 && (divisor & 1) && (divisor > 3))
                divisor++;
 
        tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
index a9fbd43395f71814d85a284a63900dc5a7dcc018..373679b3744a7992c7009281c1747b3aeb38157a 100644 (file)
@@ -572,6 +572,7 @@ config KEXEC
 config CRASH_DUMP
        bool "kernel crash dumps"
        depends on 64BIT
+       select KEXEC
        help
          Generate crash dump after being started by kexec.
          Crash dump kernels are loaded in the main kernel with kexec-tools
index 49676771bd66a278e73db7a2430612a57a1cf6a1..ffd1ac255f19444d838e58b2fb2ef2b716b02f77 100644 (file)
@@ -368,9 +368,12 @@ static inline int crypt_s390_func_available(int func,
 
        if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
                return 0;
-       if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
+
+       if (facility_mask & CRYPT_S390_MSA3 &&
+           (!test_facility(2) || !test_facility(76)))
                return 0;
-       if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
+       if (facility_mask & CRYPT_S390_MSA4 &&
+           (!test_facility(2) || !test_facility(77)))
                return 0;
 
        switch (func & CRYPT_S390_OP_MASK) {
index 0814348782966ac99c571990c89975d06aedffe1..b9acaaa175d8af35a25ba42d8157f5f5bec299df 100644 (file)
@@ -87,6 +87,8 @@ usecs_to_cputime(const unsigned int m)
        return (cputime_t) m * 4096;
 }
 
+#define usecs_to_cputime64(m)          usecs_to_cputime(m)
+
 /*
  * Convert cputime to milliseconds and back.
  */
index 24e18473d926548ec3e54732093902178a6838e9..b0c235cb6ad5c79d42414a863508e0c37db8337d 100644 (file)
@@ -47,7 +47,7 @@ struct sca_block {
 #define KVM_HPAGE_MASK(x)      (~(KVM_HPAGE_SIZE(x) - 1))
 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
 
-#define CPUSTAT_HOST       0x80000000
+#define CPUSTAT_STOPPED    0x80000000
 #define CPUSTAT_WAIT       0x10000000
 #define CPUSTAT_ECALL_PEND 0x08000000
 #define CPUSTAT_STOP_INT   0x04000000
@@ -139,6 +139,7 @@ struct kvm_vcpu_stat {
        u32 instruction_stfl;
        u32 instruction_tprot;
        u32 instruction_sigp_sense;
+       u32 instruction_sigp_sense_running;
        u32 instruction_sigp_external_call;
        u32 instruction_sigp_emergency;
        u32 instruction_sigp_stop;
index 34ede0ea85a9d0ae6d03a8c52b3bb3f43cb4e1ac..4f289ff0b7fe27b7de54d8886b6aee0b8890066f 100644 (file)
@@ -593,14 +593,16 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
        unsigned long address, bits;
        unsigned char skey;
 
+       if (!pte_present(*ptep))
+               return pgste;
        address = pte_val(*ptep) & PAGE_MASK;
        skey = page_get_storage_key(address);
        bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
        /* Clear page changed & referenced bit in the storage key */
-       if (bits) {
-               skey ^= bits;
-               page_set_storage_key(address, skey, 1);
-       }
+       if (bits & _PAGE_CHANGED)
+               page_set_storage_key(address, skey ^ bits, 1);
+       else if (bits)
+               page_reset_referenced(address);
        /* Transfer page changed & referenced bit to guest bits in pgste */
        pgste_val(pgste) |= bits << 48;         /* RCP_GR_BIT & RCP_GC_BIT */
        /* Get host changed & referenced bits from pgste */
@@ -625,6 +627,8 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
 #ifdef CONFIG_PGSTE
        int young;
 
+       if (!pte_present(*ptep))
+               return pgste;
        young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
        /* Transfer page referenced bit to pte software bit (host view) */
        if (young || (pgste_val(pgste) & RCP_HR_BIT))
@@ -638,13 +642,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
 
 }
 
-static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
+static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 {
 #ifdef CONFIG_PGSTE
        unsigned long address;
        unsigned long okey, nkey;
 
-       address = pte_val(*ptep) & PAGE_MASK;
+       if (!pte_present(entry))
+               return;
+       address = pte_val(entry) & PAGE_MASK;
        okey = nkey = page_get_storage_key(address);
        nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
        /* Set page access key and fetch protection bit from pgste */
@@ -712,7 +718,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 
        if (mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
-               pgste_set_pte(ptep, pgste);
+               pgste_set_pte(ptep, pgste, entry);
                *ptep = entry;
                pgste_set_unlock(ptep, pgste);
        } else
index 5a099714df0459b9470e3d158e0b163bf8ce770b..097183c70407a81e9147697a37c880a6008b4ed1 100644 (file)
@@ -82,6 +82,7 @@ extern unsigned int user_mode;
 #define MACHINE_FLAG_LPAR      (1UL << 12)
 #define MACHINE_FLAG_SPP       (1UL << 13)
 #define MACHINE_FLAG_TOPOLOGY  (1UL << 14)
+#define MACHINE_FLAG_STCKF     (1UL << 15)
 
 #define MACHINE_IS_VM          (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
 #define MACHINE_IS_KVM         (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -100,6 +101,7 @@ extern unsigned int user_mode;
 #define MACHINE_HAS_PFMF       (0)
 #define MACHINE_HAS_SPP                (0)
 #define MACHINE_HAS_TOPOLOGY   (0)
+#define MACHINE_HAS_STCKF      (0)
 #else /* __s390x__ */
 #define MACHINE_HAS_IEEE       (1)
 #define MACHINE_HAS_CSP                (1)
@@ -111,6 +113,7 @@ extern unsigned int user_mode;
 #define MACHINE_HAS_PFMF       (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
 #define MACHINE_HAS_SPP                (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
 #define MACHINE_HAS_TOPOLOGY   (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
+#define MACHINE_HAS_STCKF      (S390_lowcore.machine_flags & MACHINE_FLAG_STCKF)
 #endif /* __s390x__ */
 
 #define ZFCPDUMP_HSA_SIZE      (32UL<<20)
index d610bef9c5e91a425ad01a683525f46ba0c055e1..c447a27a7fdb109be95edf0d6884026df6e895ce 100644 (file)
@@ -90,7 +90,7 @@ static inline unsigned long long get_clock_fast(void)
 {
        unsigned long long clk;
 
-       if (test_facility(25))
+       if (MACHINE_HAS_STCKF)
                asm volatile(".insn     s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
        else
                clk = get_clock();
index 404bdb9671b4f5c8d03d2391e93abf26e5b38a59..58de4c91c333358000779b283efdad2de1ebe445 100644 (file)
 #define __NR_clock_adjtime     337
 #define __NR_syncfs            338
 #define __NR_setns             339
-#define NR_syscalls 340
+#define __NR_process_vm_readv  340
+#define __NR_process_vm_writev 341
+#define NR_syscalls 342
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index 5006a1d9f5d0ed6d3abd95f3cca652d87689dcef..18c51df9fe06c5ed98f785fb6af86655c025e887 100644 (file)
@@ -1627,3 +1627,23 @@ ENTRY(sys_setns_wrapper)
        lgfr    %r2,%r2                 # int
        lgfr    %r3,%r3                 # int
        jg      sys_setns
+
+ENTRY(compat_sys_process_vm_readv_wrapper)
+       lgfr    %r2,%r2                 # compat_pid_t
+       llgtr   %r3,%r3                 # struct compat_iovec __user *
+       llgfr   %r4,%r4                 # unsigned long
+       llgtr   %r5,%r5                 # struct compat_iovec __user *
+       llgfr   %r6,%r6                 # unsigned long
+       llgf    %r0,164(%r15)           # unsigned long
+       stg     %r0,160(%r15)
+       jg      sys_process_vm_readv
+
+ENTRY(compat_sys_process_vm_writev_wrapper)
+       lgfr    %r2,%r2                 # compat_pid_t
+       llgtr   %r3,%r3                 # struct compat_iovec __user *
+       llgfr   %r4,%r4                 # unsigned long
+       llgtr   %r5,%r5                 # struct compat_iovec __user *
+       llgfr   %r6,%r6                 # unsigned long
+       llgf    %r0,164(%r15)           # unsigned long
+       stg     %r0,160(%r15)
+       jg      sys_process_vm_writev
index 37394b3413e2776dfd24586f0578045e488bd054..c9ffe002519715d64ddc16d2a2caafcbf47cb8b2 100644 (file)
@@ -390,6 +390,8 @@ static __init void detect_machine_facilities(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
        if (test_facility(40))
                S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
+       if (test_facility(25))
+               S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF;
 #endif
 }
 
index 450931a45b684b2044106921a66a4702e1eb93f1..573bc29551ef471fee58b89d02df0ea0ff956503 100644 (file)
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                     ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
                        /* Invalid psw mask. */
                        return -EINVAL;
-               if (addr == (addr_t) &dummy->regs.psw.addr)
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
-
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child,
                        /* Transfer 31 bit amode bit to psw mask. */
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
                                (__u64)(tmp & PSW32_ADDR_AMODE);
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
                } else {
                        /* gpr 0-15 */
                        *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_last_break_set(struct task_struct *target,
+                              const struct user_regset *regset,
+                              unsigned int pos, unsigned int count,
+                              const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 #endif
 
 static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_last_break_get,
+               .set = s390_last_break_set,
        },
 #endif
        [REGSET_SYSTEM_CALL] = {
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_compat_last_break_set(struct task_struct *target,
+                                     const struct user_regset *regset,
+                                     unsigned int pos, unsigned int count,
+                                     const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 static const struct user_regset s390_compat_regsets[] = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_compat_last_break_get,
+               .set = s390_compat_last_break_set,
        },
        [REGSET_SYSTEM_CALL] = {
                .core_note_type = NT_S390_SYSTEM_CALL,
index 8ac6bfa2786cbe139d9964b1f4ab374e4302e2e8..e54c4ff8abaaa3d1a34efd34decd0d12c713e4ef 100644 (file)
@@ -211,6 +211,8 @@ static void __init setup_zfcpdump(unsigned int console_devno)
 
        if (ipl_info.type != IPL_TYPE_FCP_DUMP)
                return;
+       if (OLDMEM_BASE)
+               return;
        if (console_devno != -1)
                sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
                        ipl_info.data.fcp.dev_id.devno, console_devno);
@@ -482,7 +484,7 @@ static void __init setup_memory_end(void)
 
 
 #ifdef CONFIG_ZFCPDUMP
-       if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
+       if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
                memory_end = ZFCPDUMP_HSA_SIZE;
                memory_end_set = 1;
        }
@@ -577,7 +579,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
                *msg = "first memory chunk must be at least crashkernel size";
                return 0;
        }
-       if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+       if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
                return OLDMEM_BASE;
 
        for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
index 05a85bc14c98a2556e86bf40cec7e76de2dc2423..7f6f9f35454518f091e4fb86e3d39e46c8aaf391 100644 (file)
@@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs)
                                                     regs->svc_code >> 16);
                                break;
                        }
-                       /* No longer in a system call */
-                       clear_thread_flag(TIF_SYSCALL);
                }
+               /* No longer in a system call */
+               clear_thread_flag(TIF_SYSCALL);
 
                if ((is_compat_task() ?
                     handle_signal32(signr, &ka, &info, oldset, regs) :
@@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs)
        }
 
        /* No handlers present - check for system call restart */
+       clear_thread_flag(TIF_SYSCALL);
        if (current_thread_info()->system_call) {
                regs->svc_code = current_thread_info()->system_call;
                switch (regs->gprs[2]) {
@@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs)
                        regs->gprs[2] = regs->orig_gpr2;
                        set_thread_flag(TIF_SYSCALL);
                        break;
-               default:
-                       clear_thread_flag(TIF_SYSCALL);
-                       break;
                }
        }
 
index 73eb08c874fb450ef6ba0464d4aa4ca845bb6911..bcab2f04ba581f7648426a1790485d53626d8fda 100644 (file)
@@ -348,3 +348,5 @@ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at
 SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
 SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
 SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
+SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
+SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
index 77b8942b9a153af398cf9cf0e2dfd8eb7c5629ef..fdb5b8cb260f683e1f4559f95c006a7838690f60 100644 (file)
@@ -68,8 +68,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
        return mask;
 }
 
-static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
-                            struct mask_info *book, struct mask_info *core)
+static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
+                                         struct mask_info *book,
+                                         struct mask_info *core,
+                                         int z10)
 {
        unsigned int cpu;
 
@@ -88,10 +90,16 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
                        cpu_book_id[lcpu] = book->id;
 #endif
                        cpumask_set_cpu(lcpu, &core->mask);
-                       cpu_core_id[lcpu] = core->id;
+                       if (z10) {
+                               cpu_core_id[lcpu] = rcpu;
+                               core = core->next;
+                       } else {
+                               cpu_core_id[lcpu] = core->id;
+                       }
                        smp_cpu_polarization[lcpu] = tl_cpu->pp;
                }
        }
+       return core;
 }
 
 static void clear_masks(void)
@@ -123,18 +131,41 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
 {
 #ifdef CONFIG_SCHED_BOOK
        struct mask_info *book = &book_info;
+       struct cpuid cpu_id;
 #else
        struct mask_info *book = NULL;
 #endif
        struct mask_info *core = &core_info;
        union topology_entry *tle, *end;
+       int z10 = 0;
 
-
+#ifdef CONFIG_SCHED_BOOK
+       get_cpu_id(&cpu_id);
+       z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
+#endif
        spin_lock_irq(&topology_lock);
        clear_masks();
        tle = info->tle;
        end = (union topology_entry *)((unsigned long)info + info->length);
        while (tle < end) {
+#ifdef CONFIG_SCHED_BOOK
+               if (z10) {
+                       switch (tle->nl) {
+                       case 1:
+                               book = book->next;
+                               book->id = tle->container.id;
+                               break;
+                       case 0:
+                               core = add_cpus_to_mask(&tle->cpu, book, core, z10);
+                               break;
+                       default:
+                               clear_masks();
+                               goto out;
+                       }
+                       tle = next_tle(tle);
+                       continue;
+               }
+#endif
                switch (tle->nl) {
 #ifdef CONFIG_SCHED_BOOK
                case 2:
@@ -147,7 +178,7 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
                        core->id = tle->container.id;
                        break;
                case 0:
-                       add_cpus_to_mask(&tle->cpu, book, core);
+                       add_cpus_to_mask(&tle->cpu, book, core, z10);
                        break;
                default:
                        clear_masks();
@@ -328,8 +359,8 @@ void __init s390_init_cpu_topology(void)
        for (i = 0; i < TOPOLOGY_NR_MAG; i++)
                printk(" %d", info->mag[i]);
        printk(" / %d\n", info->mnest);
-       alloc_masks(info, &core_info, 2);
+       alloc_masks(info, &core_info, 1);
 #ifdef CONFIG_SCHED_BOOK
-       alloc_masks(info, &book_info, 3);
+       alloc_masks(info, &book_info, 2);
 #endif
 }
index 56fe6bc81fee45804a61c37fb189ccf73a0b72e8..e4c79ebb40e628850fd230fc94a0c7fdc77aaeb1 100644 (file)
@@ -43,6 +43,8 @@ SECTIONS
 
        NOTES :text :note
 
+       .dummy : { *(.dummy) } :data
+
        RODATA
 
 #ifdef CONFIG_SHARED_KERNEL
index 87cedd61be0467fdd4085308e55967acb9210ef3..8943e82cd4d94248ef8708839d6c7e0d99e324f9 100644 (file)
@@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
                return -EOPNOTSUPP;
        }
 
-       atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+       atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
index c7c51898984ef78e64b761ad8c6f9256fa71f30b..02434543eabbede6e5e2fbc17f8e649f5a19871a 100644 (file)
@@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
        int rc = 0;
 
        vcpu->stat.exit_stop_request++;
-       atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        spin_lock_bh(&vcpu->arch.local_int.lock);
        if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
                vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
@@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu)
        }
 
        if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
+               atomic_set_mask(CPUSTAT_STOPPED,
+                               &vcpu->arch.sie_block->cpuflags);
                vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
                VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
                rc = -EOPNOTSUPP;
index 87c16705b381396f796e7fbacbb079d62cab8d9d..278ee009ce6570a9d4049a095def51b17fb164d5 100644 (file)
@@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
                        offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
                if (rc == -EFAULT)
                        exception = 1;
+               atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
                break;
 
        case KVM_S390_PROGRAM_INT:
index 0bd3bea1e4cdfc40069a474d37f74459903803ce..d1c445732451b6d1bd1c4b95db3d6467ff629c68 100644 (file)
@@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "instruction_stfl", VCPU_STAT(instruction_stfl) },
        { "instruction_tprot", VCPU_STAT(instruction_tprot) },
        { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
+       { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
        { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
        { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
        { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
@@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        switch (ext) {
        case KVM_CAP_S390_PSW:
        case KVM_CAP_S390_GMAP:
+       case KVM_CAP_SYNC_MMU:
                r = 1;
                break;
        default:
@@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        restore_fp_regs(&vcpu->arch.guest_fpregs);
        restore_access_regs(vcpu->arch.guest_acrs);
        gmap_enable(vcpu->arch.gmap);
+       atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
        save_fp_regs(&vcpu->arch.guest_fpregs);
        save_access_regs(vcpu->arch.guest_acrs);
@@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
-       atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
+       atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
+                                                   CPUSTAT_SM |
+                                                   CPUSTAT_STOPPED);
        vcpu->arch.sie_block->ecb   = 6;
        vcpu->arch.sie_block->eca   = 0xC1002001U;
        vcpu->arch.sie_block->fac   = (int) (long) facilities;
@@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
 {
        int rc = 0;
 
-       if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
+       if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
                rc = -EBUSY;
        else {
                vcpu->run->psw_mask = psw.mask;
@@ -501,7 +507,7 @@ rerun_vcpu:
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
-       atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+       atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 
        BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
 
index 391626361084e363cfd2f0f4251dd652ebc16a58..d02638959922aba41712d16d6dfb20f86f87ec8b 100644 (file)
@@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
        u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
        u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
        struct vm_area_struct *vma;
+       unsigned long user_address;
 
        vcpu->stat.instruction_tprot++;
 
@@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
                return -EOPNOTSUPP;
 
 
+       /* we must resolve the address without holding the mmap semaphore.
+        * This is ok since the userspace hypervisor is not supposed to change
+        * the mapping while the guest queries the memory. Otherwise the guest
+        * might crash or get wrong info anyway. */
+       user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
+
        down_read(&current->mm->mmap_sem);
-       vma = find_vma(current->mm,
-                       (unsigned long) __guestaddr_to_user(vcpu, address1));
+       vma = find_vma(current->mm, user_address);
        if (!vma) {
                up_read(&current->mm->mmap_sem);
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
index f815118835f3c3221932c5e01c86847cc1ec9ba8..0a7941d74bc68b30b9121090aeb96111c86058b7 100644 (file)
 #define SIGP_SET_PREFIX        0x0d
 #define SIGP_STORE_STATUS_ADDR 0x0e
 #define SIGP_SET_ARCH          0x12
+#define SIGP_SENSE_RUNNING     0x15
 
 /* cpu status bits */
 #define SIGP_STAT_EQUIPMENT_CHECK   0x80000000UL
+#define SIGP_STAT_NOT_RUNNING      0x00000400UL
 #define SIGP_STAT_INCORRECT_STATE   0x00000200UL
 #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
 #define SIGP_STAT_EXT_CALL_PENDING  0x00000080UL
@@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
        spin_lock(&fi->lock);
        if (fi->local_int[cpu_addr] == NULL)
                rc = 3; /* not operational */
-       else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
-                & CPUSTAT_RUNNING) {
+       else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
+                 & CPUSTAT_STOPPED)) {
                *reg &= 0xffffffff00000000UL;
                rc = 1; /* status stored */
        } else {
@@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
 
        spin_lock_bh(&li->lock);
        /* cpu must be in stopped state */
-       if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
+       if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
                rc = 1; /* incorrect state */
                *reg &= SIGP_STAT_INCORRECT_STATE;
                kfree(inti);
@@ -275,6 +277,38 @@ out_fi:
        return rc;
 }
 
+static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
+                               unsigned long *reg)
+{
+       int rc;
+       struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+
+       if (cpu_addr >= KVM_MAX_VCPUS)
+               return 3; /* not operational */
+
+       spin_lock(&fi->lock);
+       if (fi->local_int[cpu_addr] == NULL)
+               rc = 3; /* not operational */
+       else {
+               if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
+                   & CPUSTAT_RUNNING) {
+                       /* running */
+                       rc = 1;
+               } else {
+                       /* not running */
+                       *reg &= 0xffffffff00000000UL;
+                       *reg |= SIGP_STAT_NOT_RUNNING;
+                       rc = 0;
+               }
+       }
+       spin_unlock(&fi->lock);
+
+       VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
+                  rc);
+
+       return rc;
+}
+
 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
 {
        int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
@@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
                rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
                                       &vcpu->arch.guest_gprs[r1]);
                break;
+       case SIGP_SENSE_RUNNING:
+               vcpu->stat.instruction_sigp_sense_running++;
+               rc = __sigp_sense_running(vcpu, cpu_addr,
+                                         &vcpu->arch.guest_gprs[r1]);
+               break;
        case SIGP_RESTART:
                vcpu->stat.instruction_sigp_restart++;
                /* user space must know about restart */
index 1766def5bc3fc7f4717904313c69b2a7879b6d86..a9a301866b3c1846f37792aa371620e2b3004415 100644 (file)
@@ -587,8 +587,13 @@ static void pfault_interrupt(unsigned int ext_int_code,
                } else {
                        /* Completion interrupt was faster than initial
                         * interrupt. Set pfault_wait to -1 so the initial
-                        * interrupt doesn't put the task to sleep. */
-                       tsk->thread.pfault_wait = -1;
+                        * interrupt doesn't put the task to sleep.
+                        * If the task is not running, ignore the completion
+                        * interrupt since it must be a leftover of a PFAULT
+                        * CANCEL operation which didn't remove all pending
+                        * completion interrupts. */
+                       if (tsk->state == TASK_RUNNING)
+                               tsk->thread.pfault_wait = -1;
                }
                put_task_struct(tsk);
        } else {
index 6efc18b5e60af4e3ce202edb5e381fe2ab45c03e..bd58b72454cf52c62b76bc049547931c74e020e6 100644 (file)
@@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (oprofile_started)
index ec8c84c14b17219cd22c08a4d7e1dfdd9c3da0ec..895e337c79b60ed7477f754f968ca80a9a52d754 100644 (file)
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
 #define GBECONT                0xffc10100
 #define GBECONT_RMII1  BIT(17)
 #define GBECONT_RMII0  BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800)
+       if (((unsigned long)addr & 0x00000fff) < 0x0800)
                writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
        else
                writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
        },
 };
 
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800) {
+       if (((unsigned long)addr & 0x00000fff) < 0x0800) {
                gpio_set_value(GPIO_PTT4, 1);
                writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
        } else {
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
 };
 
 static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
-       .chan_priv_tx   = SHDMA_SLAVE_MMCIF_TX,
-       .chan_priv_rx   = SHDMA_SLAVE_MMCIF_RX,
+       .chan_priv_tx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_TX,
+       },
+       .chan_priv_rx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_RX,
+       }
 };
 
 static struct sh_mmcif_plat_data sh_mmcif_plat = {
index b4c2d2b946ddc084bfe48b2d58128d47e2f724aa..e4dd5d5a111506889b5a69284355721fc23c80f7 100644 (file)
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
        oprofile_perf_exit();
        kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        ops->backtrace = sh_backtrace;
        return -ENODEV;
 }
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
 #endif /* CONFIG_HW_PERF_EVENTS */
index 5b31a8e89823699fbe5f99ebf6eb8d34bac8cb46..a790cc657476320831f9753ad0fd2aff9264ca0a 100644 (file)
@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
 #define kern_addr_valid(addr) \
        (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
 
-extern int io_remap_pfn_range(struct vm_area_struct *vma,
-                             unsigned long from, unsigned long pfn,
-                             unsigned long size, pgprot_t prot);
-
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  * its high 4 bits.  These macros/functions put it there or get it from there.
@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffUL)
 
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                          unsigned long, pgprot_t);
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+                                    unsigned long from, unsigned long pfn,
+                                    unsigned long size, pgprot_t prot)
+{
+       unsigned long long offset, space, phys_base;
+
+       offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
+       space = GET_IOSPACE(pfn);
+       phys_base = offset | (space << 32ULL);
+
+       return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+}
+
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
 ({                                                                       \
index adf89329af59a653f8694f05df291f2c91475506..38ebb2c601374a386192c061b50ea171c4f83373 100644 (file)
@@ -757,10 +757,6 @@ static inline bool kern_addr_valid(unsigned long addr)
 
 extern int page_in_phys_avail(unsigned long paddr);
 
-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-                              unsigned long pfn,
-                              unsigned long size, pgprot_t prot);
-
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  * its high 4 bits.  These macros/functions put it there or get it from there.
@@ -769,6 +765,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffffffffffUL)
 
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                          unsigned long, pgprot_t);
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+                                    unsigned long from, unsigned long pfn,
+                                    unsigned long size, pgprot_t prot)
+{
+       unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+       int space = GET_IOSPACE(pfn);
+       unsigned long phys_base;
+
+       phys_base = offset | (((unsigned long) space) << 32UL);
+
+       return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+}
+
 #include <asm-generic/pgtable.h>
 
 /* We provide our own get_unmapped_area to cope with VA holes and
index 7429b47c3acad8adb97ff177c4459fafcb1ae1e3..381edcd5bc2946471e2c47e4f73a607272d25c09 100644 (file)
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
 
        dp->rcv_buf_len = 4096;
 
-       dp->ds_states = kzalloc(sizeof(ds_states_template),
-                               GFP_KERNEL);
+       dp->ds_states = kmemdup(ds_states_template,
+                               sizeof(ds_states_template), GFP_KERNEL);
        if (!dp->ds_states)
                goto out_free_rcv_buf;
 
-       memcpy(dp->ds_states, ds_states_template,
-              sizeof(ds_states_template));
        dp->num_ds_states = ARRAY_SIZE(ds_states_template);
 
        for (i = 0; i < dp->num_ds_states; i++)
index e27f8ea8656e3e4b9b1c799a6d170cc9739e2cb2..0c218e4c0881fba70c1748e6a4c295e0db280652 100644 (file)
@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
 
 #else /* CONFIG_SPARC32 */
+
+#include <asm/trap_block.h>
+
 struct popc_3insn_patch_entry {
        unsigned int    addr;
        unsigned int    insns[3];
@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
        __popc_6insn_patch_end;
 
 extern void __init per_cpu_patch(void);
+extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+                                   struct sun4v_1insn_patch_entry *);
+extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+                                   struct sun4v_2insn_patch_entry *);
 extern void __init sun4v_patch(void);
 extern void __init boot_cpu_id_too_large(int cpu);
 extern unsigned int dcache_parity_tl1_occurred;
index da0c6c70ccb2c0a783043c151bd82d3dc289d86a..e5519870c3d9ab43b516c5994568af974664d616 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/processor.h>
 #include <asm/spitfire.h>
 
+#include "entry.h"
+
 #ifdef CONFIG_SPARC64
 
 #include <linux/jump_label.h>
@@ -203,6 +205,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
 }
 
 #ifdef CONFIG_SPARC64
+static void do_patch_sections(const Elf_Ehdr *hdr,
+                             const Elf_Shdr *sechdrs)
+{
+       const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
+       char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+               if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
+                       sun4v_1insn = s;
+               if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
+                       sun4v_2insn = s;
+       }
+
+       if (sun4v_1insn && tlb_type == hypervisor) {
+               void *p = (void *) sun4v_1insn->sh_addr;
+               sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
+       }
+       if (sun4v_2insn && tlb_type == hypervisor) {
+               void *p = (void *) sun4v_2insn->sh_addr;
+               sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
+       }
+}
+
 int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
@@ -210,6 +235,8 @@ int module_finalize(const Elf_Ehdr *hdr,
        /* make jump label nops */
        jump_label_apply_nops(me);
 
+       do_patch_sections(hdr, sechdrs);
+
        /* Cheetah's I-cache is fully coherent.  */
        if (tlb_type == spitfire) {
                unsigned long va;
index b272cda35a0125904bc6f17eb1c5d14054dec24e..af5755d20fbe91eb43b682e99aa8f836bfafa6ad 100644 (file)
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
        if (!irq)
                return -ENOMEM;
 
-       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-               return -EINVAL;
        if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
                return -EINVAL;
+       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+               return -EINVAL;
 
        return irq;
 }
index 46614807a57f45150f89dbb1cb5ee115beaad28c..741df916c124b10b12751da38bf7d3dac335c5dd 100644 (file)
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
        void *new_val;
        int err;
 
-       new_val = kmalloc(len, GFP_KERNEL);
+       new_val = kmemdup(val, len, GFP_KERNEL);
        if (!new_val)
                return -ENOMEM;
 
-       memcpy(new_val, val, len);
-
        err = -ENODEV;
 
        mutex_lock(&of_set_property_mutex);
index c965595aa7e968c8a28e0cfce8f8cd9158db3e0f..a854a1c240ffe1eff016a3324ecfccd1dd63ce4c 100644 (file)
@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
        }
 }
 
-void __init sun4v_patch(void)
+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
+                            struct sun4v_1insn_patch_entry *end)
 {
-       extern void sun4v_hvapi_init(void);
-       struct sun4v_1insn_patch_entry *p1;
-       struct sun4v_2insn_patch_entry *p2;
-
-       if (tlb_type != hypervisor)
-               return;
+       while (start < end) {
+               unsigned long addr = start->addr;
 
-       p1 = &__sun4v_1insn_patch;
-       while (p1 < &__sun4v_1insn_patch_end) {
-               unsigned long addr = p1->addr;
-
-               *(unsigned int *) (addr +  0) = p1->insn;
+               *(unsigned int *) (addr +  0) = start->insn;
                wmb();
                __asm__ __volatile__("flush     %0" : : "r" (addr +  0));
 
-               p1++;
+               start++;
        }
+}
 
-       p2 = &__sun4v_2insn_patch;
-       while (p2 < &__sun4v_2insn_patch_end) {
-               unsigned long addr = p2->addr;
+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+                            struct sun4v_2insn_patch_entry *end)
+{
+       while (start < end) {
+               unsigned long addr = start->addr;
 
-               *(unsigned int *) (addr +  0) = p2->insns[0];
+               *(unsigned int *) (addr +  0) = start->insns[0];
                wmb();
                __asm__ __volatile__("flush     %0" : : "r" (addr +  0));
 
-               *(unsigned int *) (addr +  4) = p2->insns[1];
+               *(unsigned int *) (addr +  4) = start->insns[1];
                wmb();
                __asm__ __volatile__("flush     %0" : : "r" (addr +  4));
 
-               p2++;
+               start++;
        }
+}
+
+void __init sun4v_patch(void)
+{
+       extern void sun4v_hvapi_init(void);
+
+       if (tlb_type != hypervisor)
+               return;
+
+       sun4v_patch_1insn_range(&__sun4v_1insn_patch,
+                               &__sun4v_1insn_patch_end);
+
+       sun4v_patch_2insn_range(&__sun4v_2insn_patch,
+                               &__sun4v_2insn_patch_end);
 
        sun4v_hvapi_init();
 }
index 2caa556db86dc44818521233c5ba7aad226ca186..023b8860dc9704330391b1db6a8eaf2ea117d32e 100644 (file)
@@ -822,21 +822,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-void do_signal32(sigset_t *oldset, struct pt_regs * regs,
-                int restart_syscall, unsigned long orig_i0)
+void do_signal32(sigset_t *oldset, struct pt_regs * regs)
 {
        struct k_sigaction ka;
+       unsigned long orig_i0;
+       int restart_syscall;
        siginfo_t info;
        int signr;
        
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
-       /* If the debugger messes with the program counter, it clears
-        * the "in syscall" bit, directing us to not perform a syscall
-        * restart.
-        */
-       if (restart_syscall && !pt_regs_is_syscall(regs))
-               restart_syscall = 0;
+       restart_syscall = 0;
+       orig_i0 = 0;
+       if (pt_regs_is_syscall(regs) &&
+           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+               restart_syscall = 1;
+               orig_i0 = regs->u_regs[UREG_G6];
+       }
 
        if (signr > 0) {
                if (restart_syscall)
index 8ce247ac04cc0d905abd7d7e1d5de20073ce654a..d54c6e53aba00323fda9f01c95aae0f001352fb9 100644 (file)
@@ -519,10 +519,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        siginfo_t info;
        int signr;
 
+       /* It's a lot of work and synchronization to add a new ptrace
+        * register for GDB to save and restore in order to get
+        * orig_i0 correct for syscall restarts when debugging.
+        *
+        * Although it should be the case that most of the global
+        * registers are volatile across a system call, glibc already
+        * depends upon that fact that we preserve them.  So we can't
+        * just use any global register to save away the orig_i0 value.
+        *
+        * In particular %g2, %g3, %g4, and %g5 are all assumed to be
+        * preserved across a system call trap by various pieces of
+        * code in glibc.
+        *
+        * %g7 is used as the "thread register".   %g6 is not used in
+        * any fixed manner.  %g6 is used as a scratch register and
+        * a compiler temporary, but it's value is never used across
+        * a system call.  Therefore %g6 is usable for orig_i0 storage.
+        */
        if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
-               restart_syscall = 1;
-       else
-               restart_syscall = 0;
+               regs->u_regs[UREG_G6] = orig_i0;
 
        if (test_thread_flag(TIF_RESTORE_SIGMASK))
                oldset = &current->saved_sigmask;
@@ -535,8 +551,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
         * the software "in syscall" bit, directing us to not perform
         * a syscall restart.
         */
-       if (restart_syscall && !pt_regs_is_syscall(regs))
-               restart_syscall = 0;
+       restart_syscall = 0;
+       if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
+               restart_syscall = 1;
+               orig_i0 = regs->u_regs[UREG_G6];
+       }
+
 
        if (signr > 0) {
                if (restart_syscall)
index a2b81598d90562695abefa70b8c66ea11832341c..f0836cd0e2f243ffb3c1df37ff02cc31488850b7 100644 (file)
@@ -529,11 +529,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        siginfo_t info;
        int signr;
        
+       /* It's a lot of work and synchronization to add a new ptrace
+        * register for GDB to save and restore in order to get
+        * orig_i0 correct for syscall restarts when debugging.
+        *
+        * Although it should be the case that most of the global
+        * registers are volatile across a system call, glibc already
+        * depends upon that fact that we preserve them.  So we can't
+        * just use any global register to save away the orig_i0 value.
+        *
+        * In particular %g2, %g3, %g4, and %g5 are all assumed to be
+        * preserved across a system call trap by various pieces of
+        * code in glibc.
+        *
+        * %g7 is used as the "thread register".   %g6 is not used in
+        * any fixed manner.  %g6 is used as a scratch register and
+        * a compiler temporary, but it's value is never used across
+        * a system call.  Therefore %g6 is usable for orig_i0 storage.
+        */
        if (pt_regs_is_syscall(regs) &&
-           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
-               restart_syscall = 1;
-       } else
-               restart_syscall = 0;
+           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
+               regs->u_regs[UREG_G6] = orig_i0;
 
        if (current_thread_info()->status & TS_RESTORE_SIGMASK)
                oldset = &current->saved_sigmask;
@@ -542,22 +558,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 
 #ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
-               extern void do_signal32(sigset_t *, struct pt_regs *,
-                                       int restart_syscall,
-                                       unsigned long orig_i0);
-               do_signal32(oldset, regs, restart_syscall, orig_i0);
+               extern void do_signal32(sigset_t *, struct pt_regs *);
+               do_signal32(oldset, regs);
                return;
        }
 #endif 
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
-       /* If the debugger messes with the program counter, it clears
-        * the software "in syscall" bit, directing us to not perform
-        * a syscall restart.
-        */
-       if (restart_syscall && !pt_regs_is_syscall(regs))
-               restart_syscall = 0;
+       restart_syscall = 0;
+       if (pt_regs_is_syscall(regs) &&
+           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+               restart_syscall = 1;
+               orig_i0 = regs->u_regs[UREG_G6];
+       }
 
        if (signr > 0) {
                if (restart_syscall)
index e7dc508c38eb47dd1f77bee9a125687287c2856a..b19570d41a39eab11e7b60d2146aec678d0c8621 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/types.h>
 #include <linux/thread_info.h>
 #include <linux/uaccess.h>
+#include <linux/errno.h>
 
 #include <asm/sigcontext.h>
 #include <asm/fpumacro.h>
index e3cda21b5ee994200b7d09e7e14c33704363e67c..301421c11291c6a0c8ef01a1af18da17453c9d35 100644 (file)
@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
 obj-y                   += fault_$(BITS).o
 obj-y                   += init_$(BITS).o
 obj-$(CONFIG_SPARC32)   += loadmmu.o
-obj-y                   += generic_$(BITS).o
 obj-$(CONFIG_SPARC32)   += extable.o btfixup.o srmmu.o iommu.o io-unit.o
 obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
 obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
index 5175ac2f4820c603d55e5fba0c06b735080edf96..8a7f81743c126b1b8648584e20504dce6111dd48 100644 (file)
@@ -302,8 +302,7 @@ void __init btfixup(void)
                                case 'i':       /* INT */
                                        if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-                                       else if ((insn & 0x80002000) == 0x80002000 &&
-                                                (insn & 0x01800000) != 0x01800000) /* %LO */
+                                       else if ((insn & 0x80002000) == 0x80002000) /* %LO */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
                                        else {
                                                prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
deleted file mode 100644 (file)
index 6ca39a6..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * generic.c: Generic Sparc mm routines that are not dependent upon
- *            MMU type but are Sparc specific.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-#include <linux/export.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-/* Remap IO memory, the same way as remap_pfn_range(), but use
- * the obio memory space.
- *
- * They use a pgprot that sets PAGE_IO and does not check the
- * mem_map table as this is independent of normal memory.
- */
-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
-               address += PAGE_SIZE;
-               offset += PAGE_SIZE;
-               pte++;
-       } while (address < end);
-}
-
-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       offset -= address;
-       do {
-               pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-       return 0;
-}
-
-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-                      unsigned long pfn, unsigned long size, pgprot_t prot)
-{
-       int error = 0;
-       pgd_t * dir;
-       unsigned long beg = from;
-       unsigned long end = from + size;
-       struct mm_struct *mm = vma->vm_mm;
-       int space = GET_IOSPACE(pfn);
-       unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
-
-       /* See comment in mm/memory.c remap_pfn_range */
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
-       vma->vm_pgoff = (offset >> PAGE_SHIFT) |
-               ((unsigned long)space << 28UL);
-
-       offset -= from;
-       dir = pgd_offset(mm, from);
-       flush_cache_range(vma, beg, end);
-
-       while (from < end) {
-               pmd_t *pmd = pmd_alloc(mm, dir, from);
-               error = -ENOMEM;
-               if (!pmd)
-                       break;
-               error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
-               if (error)
-                       break;
-               from = (from + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       }
-
-       flush_tlb_range(vma, beg, end);
-       return error;
-}
-EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
deleted file mode 100644 (file)
index 9b357dd..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * generic.c: Generic Sparc mm routines that are not dependent upon
- *            MMU type but are Sparc specific.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/export.h>
-#include <linux/pagemap.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-
-/* Remap IO memory, the same way as remap_pfn_range(), but use
- * the obio memory space.
- *
- * They use a pgprot that sets PAGE_IO and does not check the
- * mem_map table as this is independent of normal memory.
- */
-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
-                                     unsigned long address,
-                                     unsigned long size,
-                                     unsigned long offset, pgprot_t prot,
-                                     int space)
-{
-       unsigned long end;
-
-       /* clear hack bit that was used as a write_combine side-effect flag */
-       offset &= ~0x1UL;
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               pte_t entry;
-               unsigned long curend = address + PAGE_SIZE;
-               
-               entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
-               if (!(address & 0xffff)) {
-                       if (PAGE_SIZE < (4 * 1024 * 1024) &&
-                           !(address & 0x3fffff) &&
-                           !(offset & 0x3ffffe) &&
-                           end >= address + 0x400000) {
-                               entry = mk_pte_io(offset, prot, space,
-                                                 4 * 1024 * 1024);
-                               curend = address + 0x400000;
-                               offset += 0x400000;
-                       } else if (PAGE_SIZE < (512 * 1024) &&
-                                  !(address & 0x7ffff) &&
-                                  !(offset & 0x7fffe) &&
-                                  end >= address + 0x80000) {
-                               entry = mk_pte_io(offset, prot, space,
-                                                 512 * 1024 * 1024);
-                               curend = address + 0x80000;
-                               offset += 0x80000;
-                       } else if (PAGE_SIZE < (64 * 1024) &&
-                                  !(offset & 0xfffe) &&
-                                  end >= address + 0x10000) {
-                               entry = mk_pte_io(offset, prot, space,
-                                                 64 * 1024);
-                               curend = address + 0x10000;
-                               offset += 0x10000;
-                       } else
-                               offset += PAGE_SIZE;
-               } else
-                       offset += PAGE_SIZE;
-
-               if (pte_write(entry))
-                       entry = pte_mkdirty(entry);
-               do {
-                       BUG_ON(!pte_none(*pte));
-                       set_pte_at(mm, address, pte, entry);
-                       address += PAGE_SIZE;
-                       pte_val(entry) += PAGE_SIZE;
-                       pte++;
-               } while (address < curend);
-       } while (address < end);
-}
-
-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       offset -= address;
-       do {
-               pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
-               pte_unmap(pte);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-       return 0;
-}
-
-static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PUD_MASK;
-       end = address + size;
-       if (end > PUD_SIZE)
-               end = PUD_SIZE;
-       offset -= address;
-       do {
-               pmd_t *pmd = pmd_alloc(mm, pud, address);
-               if (!pud)
-                       return -ENOMEM;
-               io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
-               address = (address + PUD_SIZE) & PUD_MASK;
-               pud++;
-       } while (address < end);
-       return 0;
-}
-
-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-               unsigned long pfn, unsigned long size, pgprot_t prot)
-{
-       int error = 0;
-       pgd_t * dir;
-       unsigned long beg = from;
-       unsigned long end = from + size;
-       struct mm_struct *mm = vma->vm_mm;
-       int space = GET_IOSPACE(pfn);
-       unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
-       unsigned long phys_base;
-
-       phys_base = offset | (((unsigned long) space) << 32UL);
-
-       /* See comment in mm/memory.c remap_pfn_range */
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
-       vma->vm_pgoff = phys_base >> PAGE_SHIFT;
-
-       offset -= from;
-       dir = pgd_offset(mm, from);
-       flush_cache_range(vma, beg, end);
-
-       while (from < end) {
-               pud_t *pud = pud_alloc(mm, dir, from);
-               error = -ENOMEM;
-               if (!pud)
-                       break;
-               error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
-               if (error)
-                       break;
-               from = (from + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       }
-
-       flush_tlb_range(vma, beg, end);
-       return error;
-}
-EXPORT_SYMBOL(io_remap_pfn_range);
index 94e9a511de849c925bcf28841368ce278feade0c..f80f8ceabc67abd6ef57fce76cb1a224f53204c8 100644 (file)
@@ -74,16 +74,6 @@ enum {
  */
 void tile_irq_activate(unsigned int irq, int tile_irq_type);
 
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core.  We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
 void setup_irq_regs(void);
 
 #endif /* _ASM_TILE_IRQ_H */
index aa0134db2dd683e4f2bde38823bcabc931f7d8ef..02e62806501256ad63f322d26461bd34f8bcc4e9 100644 (file)
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
  * Remove an irq from the disabled mask.  If we're in an interrupt
  * context, defer enabling the HW interrupt until we leave.
  */
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+       get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
        if (__get_cpu_var(irq_depth) == 0)
-               unmask_irqs(1UL << irq);
+               unmask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(enable_percpu_irq);
 
 /*
  * Add an irq to the disabled mask.  We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
  * in an interrupt context, the return path is careful to avoid
  * unmasking a newly disabled interrupt.
  */
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) |= (1UL << irq);
-       mask_irqs(1UL << irq);
+       get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+       mask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(disable_percpu_irq);
 
 /* Mask an interrupt. */
 static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
 
 static struct irq_chip tile_irq_chip = {
        .name = "tile_irq_chip",
+       .irq_enable = tile_irq_chip_enable,
+       .irq_disable = tile_irq_chip_disable,
        .irq_ack = tile_irq_chip_ack,
        .irq_eoi = tile_irq_chip_eoi,
        .irq_mask = tile_irq_chip_mask,
index 658f2ce426a44ef5fefab5852cfbe05dc796953b..b3ed19f8779c4a9058ea818bc8fb31a6a75ef34b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
+#include <linux/export.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
index 2a8014cb1ff52f0ef3e24a58a5e20ddcfeb3c8cc..9d610d3fb11e9ac93c6e17875cff26257fa89e46 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/export.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
index b671a86f45152155bf0170504bdf2511112cdfb0..602908268093cf53397df895e68e240cc601d552 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <linux/stat.h>
 #include <hv/hypervisor.h>
 
 /* Return a string queried from the hypervisor, truncated to page size. */
index a87d2a859ba97de91db6e8769cb8ecb1bcd78e49..2a81d32de0da518989e5a5118ab91b77774e3e7f 100644 (file)
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
 EXPORT_SYMBOL(current_text_addr);
 EXPORT_SYMBOL(dump_stack);
 
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
 /* arch/tile/lib/, various memcpy files */
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__copy_to_user_inatomic);
index cbe6f4f9eca3c93c43039be31238fdeb56ee36dc..1cc6ae477c98b59711c29deb5674e50c63c96be3 100644 (file)
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
        VM_BUG_ON(!virt_addr_valid((void *)addr));
        page = virt_to_page((void *)addr);
        if (put_page_testzero(page)) {
-               int pages = (1 << order);
                homecache_change_page_home(page, order, initial_page_home());
-               while (pages--)
-                       __free_page(page++);
+               if (order == 0) {
+                       free_hot_cold_page(page, 0);
+               } else {
+                       init_page_count(page);
+                       __free_pages(page, order);
+               }
        }
 }
index e57dcce9bfda4da6ea6be362a542bf891289f0c0..942ed6174f1d073e565d52e5dc074c7e84d3b533 100644 (file)
@@ -237,13 +237,13 @@ menu "PKUnity NetBook-0916 Features"
 
 config I2C_BATTERY_BQ27200
        tristate "I2C Battery BQ27200 Support"
-       select PUV3_I2C
+       select I2C_PUV3
        select POWER_SUPPLY
        select BATTERY_BQ27x00
 
 config I2C_EEPROM_AT24
        tristate "I2C EEPROMs AT24 support"
-       select PUV3_I2C
+       select I2C_PUV3
        select MISC_DEVICES
        select EEPROM_AT24
 
index ae2ec334c3c61473ffa6b7b5cf8695c8d64c322b..1a36262398435ff9f9f224cab74da1a3d2bb0233 100644 (file)
@@ -44,18 +44,4 @@ config DEBUG_OCD
          Say Y here if you want the debug print routines to direct their
          output to the UniCore On-Chip-Debugger channel using CP #1.
 
-config DEBUG_OCD_BREAKPOINT
-       bool "Breakpoint support via On-Chip-Debugger"
-       depends on DEBUG_OCD
-
-config DEBUG_UART
-       int "Kernel low-level debugging messages via serial port"
-       depends on DEBUG_LL
-       range 0 1
-       default "0"
-       help
-         Choice for UART for kernel low-level using PKUnity UARTS,
-         should be between zero and one. The port must have been
-         initialised by the boot-loader before use.
-
 endmenu
index b0954a2d23cfaf5b4db0bbe1b6e95493d75ebc03..950a9afa38f8632239df166ac21ffe094229217e 100644 (file)
@@ -10,8 +10,8 @@
 # Copyright (C) 2001~2010 GUAN Xue-tao
 #
 
-EXTRA_CFLAGS   := -fpic -fno-builtin
-EXTRA_AFLAGS   := -Wa,-march=all
+ccflags-y      := -fpic -fno-builtin
+asflags-y      := -Wa,-march=all
 
 OBJS           := misc.o
 
index 1628a63289946218c12cebed99246c87087e8a18..401f597bc38cfcdf102ff6188f8bd3c27f46dde5 100644 (file)
 #ifndef __UNICORE_BITOPS_H__
 #define __UNICORE_BITOPS_H__
 
-#define find_next_bit          __uc32_find_next_bit
-#define find_next_zero_bit     __uc32_find_next_zero_bit
-
-#define find_first_bit         __uc32_find_first_bit
-#define find_first_zero_bit    __uc32_find_first_zero_bit
-
 #define _ASM_GENERIC_BITOPS_FLS_H_
 #define _ASM_GENERIC_BITOPS___FLS_H_
 #define _ASM_GENERIC_BITOPS_FFS_H_
@@ -44,4 +38,10 @@ static inline int fls(int x)
 
 #include <asm-generic/bitops.h>
 
+/* following definitions: to avoid using codes in lib/find_*.c */
+#define find_next_bit          find_next_bit
+#define find_next_zero_bit     find_next_zero_bit
+#define find_first_bit         find_first_bit
+#define find_first_zero_bit    find_first_zero_bit
+
 #endif /* __UNICORE_BITOPS_H__ */
index e11cb07865782c81b82e4dd90d1977b8f4495245..f0d780a51f9b5dec74a5310fe0f1fd2c4fdc7b28 100644 (file)
@@ -53,7 +53,6 @@ struct thread_struct {
 #define start_thread(regs, pc, sp)                                     \
 ({                                                                     \
        unsigned long *stack = (unsigned long *)sp;                     \
-       set_fs(USER_DS);                                                \
        memset(regs->uregs, 0, sizeof(regs->uregs));                    \
        regs->UCreg_asr = USER_MODE;                                    \
        regs->UCreg_pc = pc & ~1;       /* pc */                        \
index a8970809428a65985873e42ffd417782c0f47ae9..d98bd812cae1ea17efcb314a1cccee9639636b37 100644 (file)
@@ -24,8 +24,8 @@
 
 #include "ksyms.h"
 
-EXPORT_SYMBOL(__uc32_find_next_zero_bit);
-EXPORT_SYMBOL(__uc32_find_next_bit);
+EXPORT_SYMBOL(find_next_zero_bit);
+EXPORT_SYMBOL(find_next_bit);
 
 EXPORT_SYMBOL(__backtrace);
 
index c360ce905d8b7bcb0d9b84b796089987c5cf967b..c77746247d3698856f26e27531c7454382178a6e 100644 (file)
@@ -17,7 +17,7 @@
  * Purpose  : Find a 'zero' bit
  * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
  */
-__uc32_find_first_zero_bit:
+ENTRY(find_first_zero_bit)
                cxor.a  r1, #0
                beq     3f
                mov     r2, #0
@@ -29,13 +29,14 @@ __uc32_find_first_zero_bit:
                bub     1b
 3:             mov     r0, r1                  @ no free bits
                mov     pc, lr
+ENDPROC(find_first_zero_bit)
 
 /*
  * Purpose  : Find next 'zero' bit
  * Prototype: int find_next_zero_bit
  *             (void *addr, unsigned int maxbit, int offset)
  */
-ENTRY(__uc32_find_next_zero_bit)
+ENTRY(find_next_zero_bit)
                cxor.a  r1, #0
                beq     3b
                and.a   ip, r2, #7
@@ -47,14 +48,14 @@ ENTRY(__uc32_find_next_zero_bit)
                or      r2, r2, #7              @ if zero, then no bits here
                add     r2, r2, #1              @ align bit pointer
                b       2b                      @ loop for next bit
-ENDPROC(__uc32_find_next_zero_bit)
+ENDPROC(find_next_zero_bit)
 
 /*
  * Purpose  : Find a 'one' bit
  * Prototype: int find_first_bit
  *             (const unsigned long *addr, unsigned int maxbit);
  */
-__uc32_find_first_bit:
+ENTRY(find_first_bit)
                cxor.a  r1, #0
                beq     3f
                mov     r2, #0
@@ -66,13 +67,14 @@ __uc32_find_first_bit:
                bub     1b
 3:             mov     r0, r1                  @ no free bits
                mov     pc, lr
+ENDPROC(find_first_bit)
 
 /*
  * Purpose  : Find next 'one' bit
  * Prototype: int find_next_zero_bit
  *             (void *addr, unsigned int maxbit, int offset)
  */
-ENTRY(__uc32_find_next_bit)
+ENTRY(find_next_bit)
                cxor.a  r1, #0
                beq     3b
                and.a   ip, r2, #7
@@ -83,7 +85,7 @@ ENTRY(__uc32_find_next_bit)
                or      r2, r2, #7              @ if zero, then no bits here
                add     r2, r2, #1              @ align bit pointer
                b       2b                      @ loop for next bit
-ENDPROC(__uc32_find_next_bit)
+ENDPROC(find_next_bit)
 
 /*
  * One or more bits in the LSB of r3 are assumed to be set.
index cb9a1044a771be75563305f8909097a67cd21778..efb42949cc09349e37246baa07f018648186386f 100644 (file)
@@ -390,7 +390,7 @@ config X86_INTEL_CE
          This option compiles in support for the CE4100 SOC for settop
          boxes and media devices.
 
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
        bool "Intel MID platform support"
        depends on X86_32
        depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
          systems which do not have the PCI legacy interfaces (Moorestown,
          Medfield). If you are building for a PC class system say N here.
 
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+       bool
 
 config X86_MRST
        bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
        select SPI
        select INTEL_SCU_IPC
        select X86_PLATFORM_DEVICES
+       select X86_INTEL_MID
        ---help---
          Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
          Internet Device(MID) platform. Moorestown consists of two chips:
index 9b7273cb21937e8a884a08b45b3024af1bf3cfbc..1a6c09af048fbd587613c750502677d7cbb1c052 100644 (file)
@@ -49,6 +49,7 @@ extern unsigned int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
 extern int disable_apic;
+extern unsigned int lapic_timer_frequency;
 
 #ifdef CONFIG_SMP
 extern void __inquire_remote_apic(int apicid);
index 4420993acc4734c962922d58ee63e0b700cf330e..925b605eb5c601fa9a6f6c24cf41e596b92d86f8 100644 (file)
@@ -3,11 +3,15 @@
 
 #include <linux/notifier.h>
 
-#define IPCMSG_VRTC    0xFA     /* Set vRTC device */
-
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPCMSG_WARM_RESET      0xF0
+#define IPCMSG_COLD_RESET      0xF1
+#define IPCMSG_SOFT_RESET      0xF2
+#define IPCMSG_COLD_BOOT       0xF3
+
+#define IPCMSG_VRTC            0xFA     /* Set vRTC device */
+       /* Command id associated with message IPCMSG_VRTC */
+       #define IPC_CMD_VRTC_SETTIME      1 /* Set time */
+       #define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
 
 /* Read single register */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data);
index 72a8b52e7dfd0de3fa4271284fdd4a76da7fb31b..a01e7ec7d2377aaede271b6ad4ddd546ef5238a5 100644 (file)
@@ -17,7 +17,7 @@
 #define NMI_REASON_CLEAR_IOCHK 0x08
 #define NMI_REASON_CLEAR_MASK  0x0f
 
-static inline unsigned char get_nmi_reason(void)
+static inline unsigned char default_get_nmi_reason(void)
 {
        return inb(NMI_REASON_PORT);
 }
index c9321f34e55b3fa2cdb9fd4afeaa332dac345add..0e8ae57d3656c4576498b545826a5880214fe9e5 100644 (file)
@@ -201,7 +201,10 @@ int mce_notify_irq(void);
 void mce_notify_process(void);
 
 DECLARE_PER_CPU(struct mce, injectm);
-extern struct file_operations mce_chrdev_ops;
+
+extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
+                                   const char __user *ubuf,
+                                   size_t usize, loff_t *off));
 
 /*
  * Exception handler
index 719f00b28ff5358caf87d736ed5b4100dafdce9e..93f79094c2243211eede22db91acdf33098a059a 100644 (file)
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
 };
 
 extern enum mrst_cpu_type __mrst_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
 static inline enum mrst_cpu_type mrst_identify_cpu(void)
 {
        return __mrst_cpu_chip;
 }
 
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define mrst_identify_cpu()    (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
 enum mrst_timer_options {
        MRST_TIMER_DEFAULT,
        MRST_TIMER_APBT_ONLY,
@@ -44,6 +53,13 @@ enum mrst_timer_options {
 
 extern enum mrst_timer_options mrst_timer_options;
 
+/*
+ * Penwell uses spread spectrum clock, so the freq number is not exactly
+ * the same as reported by MSR based on SDM.
+ */
+#define PENWELL_FSB_FREQ_83SKU         83200
+#define PENWELL_FSB_FREQ_100SKU        99840
+
 #define SFI_MTMR_MAX_NUM 8
 #define SFI_MRTC_MAX   8
 
index 084ef95274cd78ceb51b1ea7a208a7a5e486199a..95203d40ffdde69d014c986453905280b49ee9e5 100644 (file)
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
        return native_write_msr_safe(msr, low, high);
 }
 
-/* rdmsr with exception handling */
+/*
+ * rdmsr with exception handling.
+ *
+ * Please note that the exception handling works only after we've
+ * switched to the "smart" #GP handler in trap_init() which knows about
+ * exception tables - using this macro earlier than that causes machine
+ * hangs on boxes which do not implement the @msr in the first argument.
+ */
 #define rdmsr_safe(msr, p1, p2)                                        \
 ({                                                             \
        int __err;                                              \
index c2ff2a1d845e402249e44a70e41459805c3faaa8..2d2f01ce6dcbf1a9c8b72ebef77a159e1b3a5b1a 100644 (file)
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
 void default_idle(void);
+bool set_pm_idle_to_default(void);
 
 void stop_this_cpu(void *dummy);
 
index fa7b9176b76cb33820034403fd8f4a50dc49709c..431793e5d4846f23bf5947f933fdaf9f5ae1987a 100644 (file)
@@ -32,6 +32,22 @@ extern int no_timer_check;
  *  (mathieu.desnoyers@polymtl.ca)
  *
  *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC.  Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ *    = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ *                     - sqazi@google.com
  */
 
 DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
 
 static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
 {
+       unsigned long long quot;
+       unsigned long long rem;
        int cpu = smp_processor_id();
        unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
-       ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+       quot = (cyc >> CYC2NS_SCALE_FACTOR);
+       rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+       ns += quot * per_cpu(cyc2ns, cpu) +
+               ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
        return ns;
 }
 
index 10474fb1185df7e30f52101cea4e358a18e39458..cf1d73643f60723dc514b52e66969ed46a9c01a4 100644 (file)
@@ -57,6 +57,7 @@
 
 #define UV1_HUB_PART_NUMBER    0x88a5
 #define UV2_HUB_PART_NUMBER    0x8eb8
+#define UV2_HUB_PART_NUMBER_X  0x1111
 
 /* Compat: if this #define is present, UV headers support UV2 */
 #define UV2_HUB_IS_SUPPORTED   1
index d3d859035af9e1968d0ad9d4a39000331c27c8f1..1971e652d24be5fc0674633c1edb3717645306e7 100644 (file)
@@ -152,6 +152,7 @@ struct x86_cpuinit_ops {
 /**
  * struct x86_platform_ops - platform specific runtime functions
  * @calibrate_tsc:             calibrate TSC
+ * @wallclock_init:            init the wallclock device
  * @get_wallclock:             get time from HW clock like RTC etc.
  * @set_wallclock:             set time back to HW clock
  * @is_untracked_pat_range     exclude from PAT logic
@@ -160,11 +161,13 @@ struct x86_cpuinit_ops {
  */
 struct x86_platform_ops {
        unsigned long (*calibrate_tsc)(void);
+       void (*wallclock_init)(void);
        unsigned long (*get_wallclock)(void);
        int (*set_wallclock)(unsigned long nowtime);
        void (*iommu_shutdown)(void);
        bool (*is_untracked_pat_range)(u64 start, u64 end);
        void (*nmi_init)(void);
+       unsigned char (*get_nmi_reason)(void);
        int (*i8042_detect)(void);
 };
 
index c63822816249e8c41fa47643d3204bde8a758a42..1f84794f0759327c387d602cddccb3d479188f92 100644 (file)
@@ -738,5 +738,5 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
 
        atomic_set(&stop_machine_first, 1);
        wrote_text = 0;
-       __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+       __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
 }
index a2fd72e0ab35bbff6703836f82540a0f71588e85..f98d84caf94cfdc43cedda4cb411ebea5213e4b7 100644 (file)
@@ -186,7 +186,7 @@ static struct resource lapic_resource = {
        .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
 };
 
-static unsigned int calibration_result;
+unsigned int lapic_timer_frequency = 0;
 
 static void apic_pm_activate(void);
 
@@ -454,7 +454,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
        case CLOCK_EVT_MODE_ONESHOT:
-               __setup_APIC_LVTT(calibration_result,
+               __setup_APIC_LVTT(lapic_timer_frequency,
                                  mode != CLOCK_EVT_MODE_PERIODIC, 1);
                break;
        case CLOCK_EVT_MODE_UNUSED:
@@ -638,6 +638,25 @@ static int __init calibrate_APIC_clock(void)
        long delta, deltatsc;
        int pm_referenced = 0;
 
+       /**
+        * check if lapic timer has already been calibrated by platform
+        * specific routine, such as tsc calibration code. if so, we just fill
+        * in the clockevent structure and return.
+        */
+
+       if (lapic_timer_frequency) {
+               apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
+                               lapic_timer_frequency);
+               lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
+                                       TICK_NSEC, lapic_clockevent.shift);
+               lapic_clockevent.max_delta_ns =
+                       clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+               lapic_clockevent.min_delta_ns =
+                       clockevent_delta2ns(0xF, &lapic_clockevent);
+               lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+               return 0;
+       }
+
        local_irq_disable();
 
        /* Replace the global interrupt handler */
@@ -679,12 +698,12 @@ static int __init calibrate_APIC_clock(void)
        lapic_clockevent.min_delta_ns =
                clockevent_delta2ns(0xF, &lapic_clockevent);
 
-       calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
+       lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
 
        apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
        apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
        apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
-                   calibration_result);
+                   lapic_timer_frequency);
 
        if (cpu_has_tsc) {
                apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
@@ -695,13 +714,13 @@ static int __init calibrate_APIC_clock(void)
 
        apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
                    "%u.%04u MHz.\n",
-                   calibration_result / (1000000 / HZ),
-                   calibration_result % (1000000 / HZ));
+                   lapic_timer_frequency / (1000000 / HZ),
+                   lapic_timer_frequency % (1000000 / HZ));
 
        /*
         * Do a sanity check on the APIC calibration result
         */
-       if (calibration_result < (1000000 / HZ)) {
+       if (lapic_timer_frequency < (1000000 / HZ)) {
                local_irq_enable();
                pr_warning("APIC frequency too slow, disabling apic timer\n");
                return -1;
index 3c31fa98af6dcb23a9298fae4c0ea29495167d0f..6d939d7847e293901538cfe32ae1b95d1d82551a 100644 (file)
@@ -193,10 +193,8 @@ int __init arch_early_irq_init(void)
        struct irq_cfg *cfg;
        int count, node, i;
 
-       if (!legacy_pic->nr_legacy_irqs) {
-               nr_irqs_gsi = 0;
+       if (!legacy_pic->nr_legacy_irqs)
                io_apic_irqs = ~0UL;
-       }
 
        for (i = 0; i < nr_ioapics; i++) {
                ioapics[i].saved_registers =
@@ -1696,6 +1694,7 @@ __apicdebuginit(void) print_IO_APICs(void)
        int ioapic_idx;
        struct irq_cfg *cfg;
        unsigned int irq;
+       struct irq_chip *chip;
 
        printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
        for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
@@ -1716,6 +1715,10 @@ __apicdebuginit(void) print_IO_APICs(void)
        for_each_active_irq(irq) {
                struct irq_pin_list *entry;
 
+               chip = irq_get_chip(irq);
+               if (chip != &ioapic_chip)
+                       continue;
+
                cfg = irq_get_chip_data(irq);
                if (!cfg)
                        continue;
index 62ae3001ae02c4348d640dc01f67fd055718deb0..9d59bbacd4e3cb7a76474a59e2965c739140d475 100644 (file)
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void)
 
        if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
                uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+       if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+               uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
 
        uv_hub_info->hub_revision = uv_min_hub_revision_id;
        pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
index c7e46cb353279080f2b1f67b34e283f0086a5e83..0bab2b18bb2099c4290f046bb210035b05083869 100644 (file)
@@ -442,8 +442,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
 
 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 {
-       u32 dummy;
-
        early_init_amd_mc(c);
 
        /*
@@ -473,12 +471,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
                        set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
        }
 #endif
-
-       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 {
+       u32 dummy;
+
 #ifdef CONFIG_SMP
        unsigned long long value;
 
@@ -657,6 +655,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                        checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
                }
        }
+
+       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 #ifdef CONFIG_X86_32
index 6199232161cffa181d6cf80605adb38dfbb51a61..319882ef848d3cd43c0a4dcfa41ea14cd09502ab 100644 (file)
@@ -208,7 +208,7 @@ static int inject_init(void)
        if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
                return -ENOMEM;
        printk(KERN_INFO "Machine check injector initialized\n");
-       mce_chrdev_ops.write = mce_write;
+       register_mce_write_callback(mce_write);
        register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
                                "mce_notify");
        return 0;
index 362056aefeb474c86b165c4f9c77d213b40fdbfa..2af127d4c3d1dc2ebdab3e0e2ff95d2d07595a46 100644 (file)
@@ -1634,16 +1634,35 @@ static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
        }
 }
 
-/* Modified in mce-inject.c, so not static or const */
-struct file_operations mce_chrdev_ops = {
+static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
+                           size_t usize, loff_t *off);
+
+void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
+                            const char __user *ubuf,
+                            size_t usize, loff_t *off))
+{
+       mce_write = fn;
+}
+EXPORT_SYMBOL_GPL(register_mce_write_callback);
+
+ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
+                        size_t usize, loff_t *off)
+{
+       if (mce_write)
+               return mce_write(filp, ubuf, usize, off);
+       else
+               return -EINVAL;
+}
+
+static const struct file_operations mce_chrdev_ops = {
        .open                   = mce_chrdev_open,
        .release                = mce_chrdev_release,
        .read                   = mce_chrdev_read,
+       .write                  = mce_chrdev_write,
        .poll                   = mce_chrdev_poll,
        .unlocked_ioctl         = mce_chrdev_ioctl,
        .llseek                 = no_llseek,
 };
-EXPORT_SYMBOL_GPL(mce_chrdev_ops);
 
 static struct miscdevice mce_chrdev_device = {
        MISC_MCELOG_MINOR,
index a71efcdbb0925ffe7c2f97d58d0625e4179662fc..97b26356e9ee8b022b45ae1adc5ef7628a5e2539 100644 (file)
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 
                if (tmp != mask_lo) {
                        printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+                       add_taint(TAINT_FIRMWARE_WORKAROUND);
                        mask_lo = tmp;
                }
        }
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 
        /* Disable MTRRs, and set the default type to uncached */
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+       wbinvd();
 }
 
 static void post_set(void) __releases(set_atomicity_lock)
index 640891014b2ae3dccdef498db11f05a0942145e5..2bda212a0010ca561e8f34a9d56c2502b47ff70b 100644 (file)
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event)
                        return -EOPNOTSUPP;
        }
 
-       /*
-        * Do not allow config1 (extended registers) to propagate,
-        * there's no sane user-space generalization yet:
-        */
        if (attr->type == PERF_TYPE_RAW)
-               return 0;
+               return x86_pmu_extra_regs(event->attr.config, event);
 
        if (attr->type == PERF_TYPE_HW_CACHE)
                return set_ext_hw_attr(hwc, event);
@@ -588,7 +584,7 @@ done:
                                x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
                }
        }
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 /*
@@ -607,7 +603,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
 
        if (is_x86_event(leader)) {
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
                cpuc->event_list[n] = leader;
                n++;
        }
@@ -620,7 +616,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
                        continue;
 
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
 
                cpuc->event_list[n] = event;
                n++;
@@ -1316,7 +1312,7 @@ static int validate_event(struct perf_event *event)
        c = x86_pmu.get_event_constraints(fake_cpuc, event);
 
        if (!c || !c->weight)
-               ret = -ENOSPC;
+               ret = -EINVAL;
 
        if (x86_pmu.put_event_constraints)
                x86_pmu.put_event_constraints(fake_cpuc, event);
@@ -1341,7 +1337,7 @@ static int validate_group(struct perf_event *event)
 {
        struct perf_event *leader = event->group_leader;
        struct cpu_hw_events *fake_cpuc;
-       int ret = -ENOSPC, n;
+       int ret = -EINVAL, n;
 
        fake_cpuc = allocate_fake_cpuc();
        if (IS_ERR(fake_cpuc))
index ab6343d21825d7d9328fae81fd6faef3bf092a13..3b8a2d30d14e8ebeb2c58406212fbbd3c79ba935 100644 (file)
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void)
                goto out;
        }
 
-       pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
-       pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+       pr_info("IBS: LVT offset %d assigned\n", offset);
 
        return 0;
 out:
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h
 static __init int amd_ibs_init(void)
 {
        u32 caps;
-       int ret;
+       int ret = -EINVAL;
 
        caps = __get_ibs_caps();
        if (!caps)
                return -ENODEV; /* ibs not supported by the cpu */
 
-       if (!ibs_eilvt_valid()) {
-               ret = force_ibs_eilvt_setup();
-               if (ret) {
-                       pr_err("Failed to setup IBS, %d\n", ret);
-                       return ret;
-               }
-       }
+       /*
+        * Force LVT offset assignment for family 10h: The offsets are
+        * not assigned by the BIOS for this family, so the OS is
+        * responsible for doing it. If the OS assignment fails, fall
+        * back to BIOS settings and try to setup this.
+        */
+       if (boot_cpu_data.x86 == 0x10)
+               force_ibs_eilvt_setup();
+
+       if (!ibs_eilvt_valid())
+               goto out;
 
        get_online_cpus();
        ibs_caps = caps;
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void)
        smp_call_function(setup_APIC_ibs, NULL, 1);
        put_online_cpus();
 
-       return perf_event_ibs_init();
+       ret = perf_event_ibs_init();
+out:
+       if (ret)
+               pr_err("Failed to setup IBS, %d\n", ret);
+       return ret;
 }
 
 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
index 2be5ebe9987209d41e76aede9e28ee9271070a34..121f1be4da19430426c45d1c17fce6d8d2d7037e 100644 (file)
@@ -1169,7 +1169,7 @@ again:
                 */
                c = &unconstrained;
        } else if (intel_try_alt_er(event, orig_idx)) {
-               raw_spin_unlock(&era->lock);
+               raw_spin_unlock_irqrestore(&era->lock, flags);
                goto again;
        }
        raw_spin_unlock_irqrestore(&era->lock, flags);
@@ -1545,6 +1545,13 @@ static void intel_clovertown_quirks(void)
        x86_pmu.pebs_constraints = NULL;
 }
 
+static void intel_sandybridge_quirks(void)
+{
+       printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+       x86_pmu.pebs = 0;
+       x86_pmu.pebs_constraints = NULL;
+}
+
 __init int intel_pmu_init(void)
 {
        union cpuid10_edx edx;
@@ -1694,6 +1701,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 42: /* SandyBridge */
+               x86_pmu.quirks = intel_sandybridge_quirks;
        case 45: /* SandyBridge, "Romely-EP" */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
index c0d238f49db843cbd98bea7a7bc68b700fc40d7f..73da6b64f5b788ccbb83eef3317c32161b755fca 100644 (file)
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
        unsigned long from = cpuc->lbr_entries[0].from;
        unsigned long old_to, to = cpuc->lbr_entries[0].to;
        unsigned long ip = regs->ip;
+       int is_64bit = 0;
 
        /*
         * We don't need to fixup if the PEBS assist is fault like
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
                } else
                        kaddr = (void *)to;
 
-               kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+               is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+               insn_init(&insn, kaddr, is_64bit);
                insn_get_length(&insn);
                to += insn.length;
        } while (to < ip);
index 492bf1358a7c388a9e252e8f031d6c6122e540d8..ef484d9d0a251b0128a164486096ce43c4ea8f4c 100644 (file)
@@ -1268,7 +1268,7 @@ reserve:
        }
 
 done:
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 static __initconst const struct x86_pmu p4_pmu = {
index 3b97a80ce32948ffa995f7b310024f901811703b..c99f9ed013d59985850702a617467b0bf0b556db 100644 (file)
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad EIP value.");
+                               printk(KERN_CONT " Bad EIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index 19853ad8afc5ff940cbf6806b6ae283695296cd7..6d728d9284bd0e3b94213206bd93af5afbfb7912 100644 (file)
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad RIP value.");
+                               printk(KERN_CONT " Bad RIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index b946a9eac7d9f29fb6d955e3541bf03295612367..1bb0bf4d92cd8edf9d639ea5aabd08bc7efad892 100644 (file)
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
 }
 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
 
+static void hpet_disable_rtc_channel(void)
+{
+       unsigned long cfg;
+       cfg = hpet_readl(HPET_T1_CFG);
+       cfg &= ~HPET_TN_ENABLE;
+       hpet_writel(cfg, HPET_T1_CFG);
+}
+
 /*
  * The functions below are called from rtc driver.
  * Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
                return 0;
 
        hpet_rtc_flags &= ~bit_mask;
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
+
        return 1;
 }
 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
 
 static void hpet_rtc_timer_reinit(void)
 {
-       unsigned int cfg, delta;
+       unsigned int delta;
        int lost_ints = -1;
 
-       if (unlikely(!hpet_rtc_flags)) {
-               cfg = hpet_readl(HPET_T1_CFG);
-               cfg &= ~HPET_TN_ENABLE;
-               hpet_writel(cfg, HPET_T1_CFG);
-               return;
-       }
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
 
        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
                delta = hpet_default_delta;
index acf8fbf8fbda1960de6cbd18eaf47a0a049eed8b..69bca468c47a8ffc22ea5811cb3bdf63caf0f44b 100644 (file)
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        u64 curbase = (u64)task_stack_page(current);
 
+       if (user_mode_vm(regs))
+               return;
+
        WARN_ONCE(regs->sp >= curbase &&
                  regs->sp <= curbase + THREAD_SIZE &&
                  regs->sp <  curbase + sizeof(struct thread_info) +
index c1a0188e29aef61d22706c8d0f2b99f843cf8fc7..44842d756b29fa9b2705fb8c57189fe5d28b5732 100644 (file)
@@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void)
        struct pvclock_vcpu_time_info *src;
        cycle_t ret;
 
-       src = &get_cpu_var(hv_clock);
+       preempt_disable_notrace();
+       src = &__get_cpu_var(hv_clock);
        ret = pvclock_clocksource_read(src);
-       put_cpu_var(hv_clock);
+       preempt_enable_notrace();
        return ret;
 }
 
index f2d2a664e7975acace35742dc7bf3fe3446c2aeb..9d46f5e43b51f0dd2b02ad0575c1e90470df862a 100644 (file)
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void)
        return 0;
 }
 
-static void microcode_dev_exit(void)
+static void __exit microcode_dev_exit(void)
 {
        misc_deregister(&microcode_dev);
 }
@@ -519,10 +519,8 @@ static int __init microcode_init(void)
 
        microcode_pdev = platform_device_register_simple("microcode", -1,
                                                         NULL, 0);
-       if (IS_ERR(microcode_pdev)) {
-               microcode_dev_exit();
+       if (IS_ERR(microcode_pdev))
                return PTR_ERR(microcode_pdev);
-       }
 
        get_online_cpus();
        mutex_lock(&microcode_mutex);
@@ -532,14 +530,12 @@ static int __init microcode_init(void)
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
-       if (error) {
-               platform_device_unregister(microcode_pdev);
-               return error;
-       }
+       if (error)
+               goto out_pdev;
 
        error = microcode_dev_init();
        if (error)
-               return error;
+               goto out_sysdev_driver;
 
        register_syscore_ops(&mc_syscore_ops);
        register_hotcpu_notifier(&mc_cpu_notifier);
@@ -548,6 +544,20 @@ static int __init microcode_init(void)
                " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
 
        return 0;
+
+out_sysdev_driver:
+       get_online_cpus();
+       mutex_lock(&microcode_mutex);
+
+       sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+
+       mutex_unlock(&microcode_mutex);
+       put_online_cpus();
+
+out_pdev:
+       platform_device_unregister(microcode_pdev);
+       return error;
+
 }
 module_init(microcode_init);
 
index 9103b89c145a534215824a9b2a7d80aa9e112527..0741b062a3048a6e2b1b5bd0eb4edbbf3d5bb9cf 100644 (file)
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
        }
 #endif
 
+       set_bit(m->busid, mp_bus_not_pci);
        if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
-               set_bit(m->busid, mp_bus_not_pci);
 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
                mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 #endif
index b9c8628974af3ae4eeb7cc73891140618df589b2..e88f37b58dddeeaecfb8951649db799986b7e247 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/traps.h>
 #include <asm/mach_traps.h>
 #include <asm/nmi.h>
+#include <asm/x86_init.h>
 
 #define NMI_MAX_NAMELEN        16
 struct nmiaction {
@@ -348,7 +349,7 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
 
        /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
        raw_spin_lock(&nmi_reason_lock);
-       reason = get_nmi_reason();
+       reason = x86_platform.get_nmi_reason();
 
        if (reason & NMI_REASON_MASK) {
                if (reason & NMI_REASON_SERR)
index b9b3b1a51643931a0405a1c0f9cc5b62eec43b1f..ee5d4fbd53b4bac72d19e3aa3000e77467f0c25e 100644 (file)
@@ -403,6 +403,14 @@ void default_idle(void)
 EXPORT_SYMBOL(default_idle);
 #endif
 
+bool set_pm_idle_to_default(void)
+{
+       bool ret = !!pm_idle;
+
+       pm_idle = default_idle;
+
+       return ret;
+}
 void stop_this_cpu(void *dummy)
 {
        local_irq_disable();
index b78643d0f9a53d8b2050a14f7e8185b540ed8595..03920a15a632289605c6f0ba0b563d64065214a7 100644 (file)
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
                        quirk_amd_nb_node);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
                        quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+                       quirk_amd_nb_node);
+
 #endif
index e334be1182b9f0b3666464f8ff9f13b22d3ef1dc..37a458b521a6020598b69305c782a0f28bd75b27 100644 (file)
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
  */
 
 /*
- * Some machines require the "reboot=b"  commandline option,
+ * Some machines require the "reboot=b" or "reboot=k"  commandline options,
  * this quirk makes that automatic.
  */
 static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_KBD) {
+               reboot_type = BOOT_KBD;
+               printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 static struct dmi_system_id __initdata reboot_dmi_table[] = {
        {       /* Handle problems with rebooting on Dell E520's */
                .callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
        { /* Handle reboot issue on Acer Aspire one */
-               .callback = set_bios_reboot,
+               .callback = set_kbd_reboot,
                .ident = "Acer Aspire One A110",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
                },
        },
+       {       /* Handle problems with rebooting on the OptiPlex 990. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+               },
+       },
        { }
 };
 
index 348ce016a835c291deeae528fb327ea8f93ffbdb..af6db6ec5b2a20db3d13861bdb97b878f8371f1e 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/vsyscall.h>
 #include <asm/x86_init.h>
 #include <asm/time.h>
+#include <asm/mrst.h>
 
 #ifdef CONFIG_X86_32
 /*
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void)
        if (of_have_populated_dt())
                return 0;
 
+       /* Intel MID platforms don't have ioport rtc */
+       if (mrst_identify_cpu())
+               return -ENODEV;
+
        platform_device_register(&rtc_device);
        dev_info(&rtc_device.dev,
                 "registered platform RTC device (no PNP device found)\n");
index afaf38447ef5fc42c53e78168492fa9eb89a032b..cf0ef986cb6dff51348c17c691491f6f48c61a60 100644 (file)
@@ -1045,6 +1045,8 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.timers.wallclock_init();
 
+       x86_platform.wallclock_init();
+
        mcheck_init();
 
        arch_init_ideal_nops();
index 6f164bd5e14d167d417b2f86e5fc8580047ed371..c1d6cd549397ad54dbcb32526c9170e1f590a759 100644 (file)
 #include <asm/pat.h>
 #include <asm/tsc.h>
 #include <asm/iommu.h>
+#include <asm/mach_traps.h>
 
 void __cpuinit x86_init_noop(void) { }
 void __init x86_init_uint_noop(unsigned int unused) { }
 void __init x86_init_pgd_noop(pgd_t *unused) { }
 int __init iommu_init_noop(void) { return 0; }
 void iommu_shutdown_noop(void) { }
+void wallclock_init_noop(void) { }
 
 /*
  * The platform setup functions are preset with the default functions
@@ -97,11 +99,13 @@ static int default_i8042_detect(void) { return 1; };
 
 struct x86_platform_ops x86_platform = {
        .calibrate_tsc                  = native_calibrate_tsc,
+       .wallclock_init                 = wallclock_init_noop,
        .get_wallclock                  = mach_get_cmos_time,
        .set_wallclock                  = mach_set_rtc_mmss,
        .iommu_shutdown                 = iommu_shutdown_noop,
        .is_untracked_pat_range         = is_ISA_range,
        .nmi_init                       = default_nmi_init,
+       .get_nmi_reason                 = default_get_nmi_reason,
        .i8042_detect                   = default_i8042_detect
 };
 
index 76e3f1cd03696997964db7814de98a578a9eb4a7..405f2620392f5e32d393166520e707b2d039b3bb 100644 (file)
@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
                return HRTIMER_NORESTART;
 }
 
-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
+static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
 {
+       struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
        struct kvm_timer *pt = &ps->pit_timer;
        s64 interval;
 
+       if (!irqchip_in_kernel(kvm))
+               return;
+
        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
 
        pr_debug("create pit timer, interval is %llu nsec\n", interval);
@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
         /* FIXME: enhance mode 4 precision */
        case 4:
                if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
-                       create_pit_timer(ps, val, 0);
+                       create_pit_timer(kvm, val, 0);
                }
                break;
        case 2:
        case 3:
                if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
-                       create_pit_timer(ps, val, 1);
+                       create_pit_timer(kvm, val, 1);
                }
                break;
        default:
index a0d6bd9ad442f1746bfa66eaabe86641470b7d0f..579a0b51696ac560b1768ae445bb8f2084172f8c 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/mce.h>
 #include <asm/i387.h>
 #include <asm/xcr.h>
+#include <asm/perf_event.h>
 
 #include "trace.h"
 
@@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO);
 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
 module_param(ple_window, int, S_IRUGO);
 
-#define NR_AUTOLOAD_MSRS 1
+#define NR_AUTOLOAD_MSRS 8
 #define VMCS02_POOL_SIZE 1
 
 struct vmcs {
@@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 
 static bool cpu_has_load_ia32_efer;
+static bool cpu_has_load_perf_global_ctrl;
 
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -1191,15 +1193,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
 
+static void clear_atomic_switch_msr_special(unsigned long entry,
+               unsigned long exit)
+{
+       vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
+       vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
+}
+
 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
 {
        unsigned i;
        struct msr_autoload *m = &vmx->msr_autoload;
 
-       if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
-               vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
-               vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
-               return;
+       switch (msr) {
+       case MSR_EFER:
+               if (cpu_has_load_ia32_efer) {
+                       clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+                                       VM_EXIT_LOAD_IA32_EFER);
+                       return;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (cpu_has_load_perf_global_ctrl) {
+                       clear_atomic_switch_msr_special(
+                                       VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+                                       VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
+                       return;
+               }
+               break;
        }
 
        for (i = 0; i < m->nr; ++i)
@@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
 }
 
+static void add_atomic_switch_msr_special(unsigned long entry,
+               unsigned long exit, unsigned long guest_val_vmcs,
+               unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
+{
+       vmcs_write64(guest_val_vmcs, guest_val);
+       vmcs_write64(host_val_vmcs, host_val);
+       vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
+       vmcs_set_bits(VM_EXIT_CONTROLS, exit);
+}
+
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                                  u64 guest_val, u64 host_val)
 {
        unsigned i;
        struct msr_autoload *m = &vmx->msr_autoload;
 
-       if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
-               vmcs_write64(GUEST_IA32_EFER, guest_val);
-               vmcs_write64(HOST_IA32_EFER, host_val);
-               vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
-               vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
-               return;
+       switch (msr) {
+       case MSR_EFER:
+               if (cpu_has_load_ia32_efer) {
+                       add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+                                       VM_EXIT_LOAD_IA32_EFER,
+                                       GUEST_IA32_EFER,
+                                       HOST_IA32_EFER,
+                                       guest_val, host_val);
+                       return;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (cpu_has_load_perf_global_ctrl) {
+                       add_atomic_switch_msr_special(
+                                       VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+                                       VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
+                                       GUEST_IA32_PERF_GLOBAL_CTRL,
+                                       HOST_IA32_PERF_GLOBAL_CTRL,
+                                       guest_val, host_val);
+                       return;
+               }
+               break;
        }
 
        for (i = 0; i < m->nr; ++i)
                if (m->guest[i].index == msr)
                        break;
 
-       if (i == m->nr) {
+       if (i == NR_AUTOLOAD_MSRS) {
+               printk_once(KERN_WARNING"Not enough mst switch entries. "
+                               "Can't add msr %x\n", msr);
+               return;
+       } else if (i == m->nr) {
                ++m->nr;
                vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
                vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
@@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
                                   VM_EXIT_LOAD_IA32_EFER);
 
+       cpu_has_load_perf_global_ctrl =
+               allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
+                               VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+               && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
+                                  VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
+
+       /*
+        * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
+        * but due to arrata below it can't be used. Workaround is to use
+        * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
+        *
+        * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
+        *
+        * AAK155             (model 26)
+        * AAP115             (model 30)
+        * AAT100             (model 37)
+        * BC86,AAY89,BD102   (model 44)
+        * BA97               (model 46)
+        *
+        */
+       if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
+               switch (boot_cpu_data.x86_model) {
+               case 26:
+               case 30:
+               case 37:
+               case 44:
+               case 46:
+                       cpu_has_load_perf_global_ctrl = false;
+                       printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
+                                       "does not work properly. Using workaround\n");
+                       break;
+               default:
+                       break;
+               }
+       }
+
        return 0;
 }
 
@@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
 }
 
+static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
+{
+       int i, nr_msrs;
+       struct perf_guest_switch_msr *msrs;
+
+       msrs = perf_guest_get_msrs(&nr_msrs);
+
+       if (!msrs)
+               return;
+
+       for (i = 0; i < nr_msrs; i++)
+               if (msrs[i].host == msrs[i].guest)
+                       clear_atomic_switch_msr(vmx, msrs[i].msr);
+               else
+                       add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
+                                       msrs[i].host);
+}
+
 #ifdef CONFIG_X86_64
 #define R "r"
 #define Q "q"
@@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       atomic_switch_perf_msrs(vmx);
+
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
                /* Store host registers */
index c38efd7b792eec9aab414892c23818446ad3fad0..4c938da2ba00b40dc430d5a7854b0ee62b6ce631 100644 (file)
@@ -602,7 +602,6 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
        struct kvm_lapic *apic = vcpu->arch.apic;
-       u32 timer_mode_mask;
 
        best = kvm_find_cpuid_entry(vcpu, 1, 0);
        if (!best)
@@ -615,15 +614,12 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
                        best->ecx |= bit(X86_FEATURE_OSXSAVE);
        }
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-               best->function == 0x1) {
-               best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER);
-               timer_mode_mask = 3 << 17;
-       } else
-               timer_mode_mask = 1 << 17;
-
-       if (apic)
-               apic->lapic_timer.timer_mode_mask = timer_mode_mask;
+       if (apic) {
+               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+                       apic->lapic_timer.timer_mode_mask = 3 << 17;
+               else
+                       apic->lapic_timer.timer_mode_mask = 1 << 17;
+       }
 }
 
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -2135,6 +2131,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_TSC_CONTROL:
                r = kvm_has_tsc_control;
                break;
+       case KVM_CAP_TSC_DEADLINE_TIMER:
+               r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
+               break;
        default:
                r = 0;
                break;
index ea305856151cefc62fccd7f216519bc9d5f2945c..dd74e46828c0fc243740b61a18c2dea654fafb5e 100644 (file)
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
+               if (PageTail(page))
+                       get_huge_page_tail(page);
                (*nr)++;
                page++;
                refs++;
index b49962662101a0cf7361f0035e1b017333efc22a..f4f29b19fac5f2cc7c46023ef86c02a66b137e8e 100644 (file)
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
        set_pte(kmap_pte-idx, mk_pte(page, prot));
+       arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
 }
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
+               arch_flush_lazy_mmu_mode();
        }
 #ifdef CONFIG_DEBUG_HIGHMEM
        else {
index bfab3fa10edc63e50b4184d00e75566678242a0d..7b65f752c5f8fd79af2c6b4afb342988bdd8d56c 100644 (file)
@@ -568,8 +568,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                        break;
                                }
                                if (filter[i].jt != 0) {
-                                       if (filter[i].jf)
-                                               t_offset += is_near(f_offset) ? 2 : 6;
+                                       if (filter[i].jf && f_offset)
+                                               t_offset += is_near(f_offset) ? 2 : 5;
                                        EMIT_COND_JMP(t_op, t_offset);
                                        if (filter[i].jf)
                                                EMIT_JMP(f_offset);
index cdfe4c54decac05e4943a00e27803f78898b6419..f148cf65267836d66e1fa666d612dca5669950c3 100644 (file)
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
 extern void op_nmi_exit(void);
 extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
 
+static int nmi_timer;
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 #ifdef CONFIG_X86_LOCAL_APIC
        ret = op_nmi_init(ops);
 #endif
+       nmi_timer = (ret != 0);
 #ifdef CONFIG_X86_IO_APIC
-       if (ret < 0)
+       if (nmi_timer)
                ret = op_nmi_timer_init(ops);
 #endif
        ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 void oprofile_arch_exit(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
-       op_nmi_exit();
+       if (!nmi_timer)
+               op_nmi_exit();
 #endif
 }
index e36bf714cb7727352c237d6bffa0d838d2c1c08e..40e446941dd7eceb587b2b4572c96279631e5f56 100644 (file)
  */
 
 static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
 
 void efi_call_phys_prelog(void)
 {
-       unsigned long cr4;
-       unsigned long temp;
        struct desc_ptr gdt_descr;
 
        local_irq_save(efi_rt_eflags);
 
-       /*
-        * If I don't have PAE, I should just duplicate two entries in page
-        * directory. If I have PAE, I just need to duplicate one entry in
-        * page directory.
-        */
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               swapper_pg_dir[0].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-       } else {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               efi_bak_pg_dir_pointer[1].pgd =
-                   swapper_pg_dir[pgd_index(0x400000)].pgd;
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-               temp = PAGE_OFFSET + 0x400000;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   swapper_pg_dir[pgd_index(temp)].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(initial_page_table);
        __flush_tlb_all();
 
        gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
 
 void efi_call_phys_epilog(void)
 {
-       unsigned long cr4;
        struct desc_ptr gdt_descr;
 
        gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
 
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-       } else {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   efi_bak_pg_dir_pointer[1].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 
        local_irq_restore(efi_rt_eflags);
index 541020df0da6713f485b313829def31dab03b334..ad4ec1cb097ecfae17e4a99aed37cf985b03cb78 100644 (file)
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
 int sfi_mrtc_num;
 
+static void mrst_power_off(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
+}
+
+static void mrst_reboot(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+       else
+               intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
 /* parse all the mtimer info to a static mtimer array */
 static int __init sfi_parse_mtmr(struct sfi_table_header *table)
 {
@@ -187,11 +201,34 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
 static unsigned long __init mrst_calibrate_tsc(void)
 {
        unsigned long flags, fast_calibrate;
-
-       local_irq_save(flags);
-       fast_calibrate = apbt_quick_calibrate();
-       local_irq_restore(flags);
-
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_PENWELL) {
+               u32 lo, hi, ratio, fsb;
+
+               rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+               pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
+               ratio = (hi >> 8) & 0x1f;
+               pr_debug("ratio is %d\n", ratio);
+               if (!ratio) {
+                       pr_err("read a zero ratio, should be incorrect!\n");
+                       pr_err("force tsc ratio to 16 ...\n");
+                       ratio = 16;
+               }
+               rdmsr(MSR_FSB_FREQ, lo, hi);
+               if ((lo & 0x7) == 0x7)
+                       fsb = PENWELL_FSB_FREQ_83SKU;
+               else
+                       fsb = PENWELL_FSB_FREQ_100SKU;
+               fast_calibrate = ratio * fsb;
+               pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
+               lapic_timer_frequency = fsb * 1000 / HZ;
+               /* mark tsc clocksource as reliable */
+               set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+       } else {
+               local_irq_save(flags);
+               fast_calibrate = apbt_quick_calibrate();
+               local_irq_restore(flags);
+       }
+       
        if (fast_calibrate)
                return fast_calibrate;
 
@@ -242,15 +279,15 @@ static int mrst_i8042_detect(void)
        return 0;
 }
 
-/* Reboot and power off are handled by the SCU on a MID device */
-static void mrst_power_off(void)
-{
-       intel_scu_ipc_simple_command(0xf1, 1);
-}
-
-static void mrst_reboot(void)
+/*
+ * Moorestown does not have external NMI source nor port 0x61 to report
+ * NMI status. The possible NMI sources are from pmu as a result of NMI
+ * watchdog or lock debug. Reading io port 0x61 results in 0xff which
+ * misled NMI handler.
+ */
+static unsigned char mrst_get_nmi_reason(void)
 {
-       intel_scu_ipc_simple_command(0xf1, 0);
+       return 0;
 }
 
 /*
@@ -274,6 +311,8 @@ void __init x86_mrst_early_setup(void)
        x86_platform.calibrate_tsc = mrst_calibrate_tsc;
        x86_platform.i8042_detect = mrst_i8042_detect;
        x86_init.timers.wallclock_init = mrst_rtc_init;
+       x86_platform.get_nmi_reason = mrst_get_nmi_reason;
+
        x86_init.pci.init = pci_mrst_init;
        x86_init.pci.fixup_irqs = x86_init_noop;
 
@@ -448,6 +487,46 @@ static void __init *max7315_platform_data(void *info)
        return max7315;
 }
 
+static void *tca6416_platform_data(void *info)
+{
+       static struct pca953x_platform_data tca6416;
+       struct i2c_board_info *i2c_info = info;
+       int gpio_base, intr;
+       char base_pin_name[SFI_NAME_LEN + 1];
+       char intr_pin_name[SFI_NAME_LEN + 1];
+
+       strcpy(i2c_info->type, "tca6416");
+       strcpy(base_pin_name, "tca6416_base");
+       strcpy(intr_pin_name, "tca6416_int");
+
+       gpio_base = get_gpio_by_name(base_pin_name);
+       intr = get_gpio_by_name(intr_pin_name);
+
+       if (gpio_base == -1)
+               return NULL;
+       tca6416.gpio_base = gpio_base;
+       if (intr != -1) {
+               i2c_info->irq = intr + MRST_IRQ_OFFSET;
+               tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
+       } else {
+               i2c_info->irq = -1;
+               tca6416.irq_base = -1;
+       }
+       return &tca6416;
+}
+
+static void *mpu3050_platform_data(void *info)
+{
+       struct i2c_board_info *i2c_info = info;
+       int intr = get_gpio_by_name("mpu3050_int");
+
+       if (intr == -1)
+               return NULL;
+
+       i2c_info->irq = intr + MRST_IRQ_OFFSET;
+       return NULL;
+}
+
 static void __init *emc1403_platform_data(void *info)
 {
        static short intr2nd_pdata;
@@ -610,12 +689,15 @@ static void *msic_ocd_platform_data(void *info)
 static const struct devs_id __initconst device_ids[] = {
        {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
        {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+       {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
        {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
        {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
        {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+       {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
        {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
        {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
        {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
+       {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
 
        /* MSIC subdevices */
        {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
index 118c143a9cb49ca03ced909f1a0744902889d701..2c32df6fe23167a6ca3955fdac07444f6f2fbc34 100644 (file)
@@ -11,7 +11,7 @@
 #endif
 
 #define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP)
-#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_IP)
+#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_SP)
 #define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP)
 
 #define ARCH_IS_STACKGROW(address) \
index da8afd576a6b7d161e3e81f2997912a73fec3d31..1f928659c338e6e5cfcf6386446f087f41f1e22f 100644 (file)
@@ -1356,7 +1356,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
        int cpu = (long)hcpu;
        switch (action) {
        case CPU_UP_PREPARE:
-               per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+               xen_vcpu_setup(cpu);
                if (xen_have_vector_callback)
                        xen_init_lock_cpu(cpu);
                break;
@@ -1386,7 +1386,6 @@ static void __init xen_hvm_guest_init(void)
        xen_hvm_smp_init();
        register_cpu_notifier(&xen_hvm_cpu_notifier);
        xen_unplug_emulated_devices();
-       have_vcpu_info_placement = 0;
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
        xen_hvm_init_mmu_ops();
index 6bbfd7ac5e814b10c96b925407c18cfaabb32453..5a40d24ba3316b85b42e5ef5d00280b89a2033b9 100644 (file)
@@ -71,7 +71,7 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
 
        if (shared == NULL) {
                struct vm_struct *area =
-                       alloc_vm_area(PAGE_SIZE * max_nr_gframes);
+                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
                BUG_ON(area == NULL);
                shared = area->addr;
                *__shared = shared;
index 38d0af4fefec19f52d5e724c8f08102d391dc2e6..b2c7179fa26343d5cedfb7846d2f1b92ceb97390 100644 (file)
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
        domid_t domid = DOMID_SELF;
        int ret;
 
-       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
-       if (ret > 0)
-               max_pages = ret;
+       /*
+        * For the initial domain we use the maximum reservation as
+        * the maximum page.
+        *
+        * For guest domains the current maximum reservation reflects
+        * the current maximum rather than the static maximum. In this
+        * case the e820 map provided to us will cover the static
+        * maximum region.
+        */
+       if (xen_initial_domain()) {
+               ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+               if (ret > 0)
+                       max_pages = ret;
+       }
+
        return min(max_pages, MAX_DOMAIN_PAGES);
 }
 
@@ -410,6 +422,6 @@ void __init xen_arch_setup(void)
 #endif
        disable_cpuidle();
        boot_option_idle_override = IDLE_HALT;
-
+       WARN_ON(set_pm_idle_to_default());
        fiddle_vdso();
 }
index f43c8a5840ae488131dbf395d5eaf96e2fea876d..15de223c7f9371a9da852825ea8857789d94ae70 100644 (file)
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (drain_all)
                        blk_throtl_drain(q);
 
-               __blk_run_queue(q);
+               /*
+                * This function might be called on a queue which failed
+                * driver init after queue creation.  Some drivers
+                * (e.g. fd) get unhappy in such cases.  Kick queue iff
+                * dispatch queue has something on it.
+                */
+               if (!list_empty(&q->queue_head))
+                       __blk_run_queue(q);
 
                if (drain_all)
                        nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        q->backing_dev_info.name = "block";
+       q->node = node_id;
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        if (!uninit_q)
                return NULL;
 
-       q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+       q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
                blk_cleanup_queue(uninit_q);
 
@@ -562,19 +570,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
-{
-       return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-                             spinlock_t *lock, int node_id)
 {
        if (!q)
                return NULL;
 
-       q->node = node_id;
        if (blk_init_free_list(q))
                return NULL;
 
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
@@ -1379,15 +1378,19 @@ get_rq:
                 */
                if (list_empty(&plug->list))
                        trace_block_plug(q);
-               else if (!plug->should_sort) {
-                       struct request *__rq;
+               else {
+                       if (!plug->should_sort) {
+                               struct request *__rq;
 
-                       __rq = list_entry_rq(plug->list.prev);
-                       if (__rq->q != q)
-                               plug->should_sort = 1;
+                               __rq = list_entry_rq(plug->list.prev);
+                               if (__rq->q != q)
+                                       plug->should_sort = 1;
+                       }
+                       if (request_count >= BLK_MAX_REQUEST_COUNT) {
+                               blk_flush_plug_list(plug, false);
+                               trace_block_plug(q);
+                       }
                }
-               if (request_count >= BLK_MAX_REQUEST_COUNT)
-                       blk_flush_plug_list(plug, false);
                list_add_tail(&req->queuelist, &plug->list);
                drive_stat_acct(req, 1);
        } else {
index e663ac2d8e68f70ff17ce274f3cebec16c1dd18c..623e1cd4cffe997e71fbb54577bd42f220af64b3 100644 (file)
@@ -204,10 +204,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                if (!iov[i].iov_len)
                        return -EINVAL;
 
-               if (uaddr & queue_dma_alignment(q)) {
+               /*
+                * Keep going so we check length of all segments
+                */
+               if (uaddr & queue_dma_alignment(q))
                        unaligned = 1;
-                       break;
-               }
        }
 
        if (unaligned || (q->dma_pad_mask & len) || map_data)
@@ -310,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       if (rq_data_dir(rq) == WRITE)
+       if (!reading)
                bio->bi_rw |= REQ_WRITE;
 
        if (do_copy)
index e74d6d13838f3ae9f717911a38d21885623029c7..4af6f5cc1167a65494dc52b39cd3f1dfd1271087 100644 (file)
@@ -282,18 +282,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
-       int tag = rq->tag;
+       unsigned tag = rq->tag; /* negative tags invalid */
 
-       BUG_ON(tag == -1);
-
-       if (unlikely(tag >= bqt->max_depth)) {
-               /*
-                * This can happen after tag depth has been reduced.
-                * But tag shouldn't be larger than real_max_depth.
-                */
-               WARN_ON(tag >= bqt->real_max_depth);
-               return;
-       }
+       BUG_ON(tag >= bqt->real_max_depth);
 
        list_del_init(&rq->queuelist);
        rq->cmd_flags &= ~REQ_QUEUED;
index 16ace89613bc6e4ce343cdd2f1da85218b7dc5e7..3548705b04e482a4405097217011256105c4bdca 100644 (file)
@@ -1655,6 +1655,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
                    struct request *next)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
        /*
         * reposition in fifo if next is older than rq
         */
@@ -1669,6 +1671,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
        cfq_remove_request(next);
        cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(next), rq_is_sync(next));
+
+       cfqq = RQ_CFQQ(next);
+       /*
+        * all requests of this queue are merged to other queues, delete it
+        * from the service tree. If it's the active_queue,
+        * cfq_dispatch_requests() will choose to expire it or do idle
+        */
+       if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+           cfqq != cfqd->active_queue)
+               cfq_del_cfqq_rr(cfqd, cfqq);
 }
 
 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -3184,7 +3196,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
                }
        }
 
-       if (ret)
+       if (ret && ret != -EEXIST)
                printk(KERN_ERR "cfq: cic link failed!\n");
 
        return ret;
@@ -3200,6 +3212,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct io_context *ioc = NULL;
        struct cfq_io_context *cic;
+       int ret;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
@@ -3207,6 +3220,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (!ioc)
                return NULL;
 
+retry:
        cic = cfq_cic_lookup(cfqd, ioc);
        if (cic)
                goto out;
@@ -3215,7 +3229,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (cic == NULL)
                goto err;
 
-       if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+       ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+       if (ret == -EEXIST) {
+               /* someone has linked cic to ioc already */
+               cfq_cic_free(cic);
+               goto retry;
+       } else if (ret)
                goto err_free;
 
 out:
@@ -4036,6 +4055,11 @@ static void *cfq_init_queue(struct request_queue *q)
 
        if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
                kfree(cfqg);
+
+               spin_lock(&cic_index_lock);
+               ida_remove(&cic_index_ida, cfqd->cic_index);
+               spin_unlock(&cic_index_lock);
+
                kfree(cfqd);
                return NULL;
        }
index 9253839714ff95b4acc6da413bd87f610ddce44c..02e9fca808256f762e0af44e8a1055c322b51b40 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/mutex.h>
 #include <linux/idr.h>
 #include <linux/log2.h>
-#include <linux/ctype.h>
 
 #include "blk.h"
 
@@ -916,74 +915,6 @@ static int __init genhd_device_init(void)
 
 subsys_initcall(genhd_device_init);
 
-static ssize_t alias_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       struct gendisk *disk = dev_to_disk(dev);
-       ssize_t ret = 0;
-
-       if (disk->alias)
-               ret = snprintf(buf, ALIAS_LEN, "%s\n", disk->alias);
-       return ret;
-}
-
-static ssize_t alias_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       struct gendisk *disk = dev_to_disk(dev);
-       char *alias;
-       char *envp[] = { NULL, NULL };
-       unsigned char c;
-       int i;
-       ssize_t ret = count;
-
-       if (!count)
-               return -EINVAL;
-
-       if (count >= ALIAS_LEN) {
-               printk(KERN_ERR "alias: alias is too long\n");
-               return -EINVAL;
-       }
-
-       /* Validation check */
-       for (i = 0; i < count; i++) {
-               c = buf[i];
-               if (i == count - 1 && c == '\n')
-                       break;
-               if (!isalnum(c) && c != '_' && c != '-') {
-                       printk(KERN_ERR "alias: invalid alias\n");
-                       return -EINVAL;
-               }
-       }
-
-       if (disk->alias) {
-               printk(KERN_INFO "alias: %s is already assigned (%s)\n",
-                      disk->disk_name, disk->alias);
-               return -EINVAL;
-       }
-
-       alias = kasprintf(GFP_KERNEL, "%s", buf);
-       if (!alias)
-               return -ENOMEM;
-
-       if (alias[count - 1] == '\n')
-               alias[count - 1] = '\0';
-
-       envp[0] = kasprintf(GFP_KERNEL, "ALIAS=%s", alias);
-       if (!envp[0]) {
-               kfree(alias);
-               return -ENOMEM;
-       }
-
-       disk->alias = alias;
-       printk(KERN_INFO "alias: assigned %s to %s\n", alias, disk->disk_name);
-
-       kobject_uevent_env(&dev->kobj, KOBJ_ADD, envp);
-
-       kfree(envp[0]);
-       return ret;
-}
-
 static ssize_t disk_range_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
 {
@@ -1043,7 +974,6 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
        return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
 }
 
-static DEVICE_ATTR(alias, S_IRUGO|S_IWUSR, alias_show, alias_store);
 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
 static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
 static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
@@ -1066,7 +996,6 @@ static struct device_attribute dev_attr_fail_timeout =
 #endif
 
 static struct attribute *disk_attrs[] = {
-       &dev_attr_alias.attr,
        &dev_attr_range.attr,
        &dev_attr_ext_range.attr,
        &dev_attr_removable.attr,
index 127408069ca7fa745953fd5a8e164cd74779c680..631b9477b99c02f827103aa00a8d3a83c380d359 100644 (file)
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
 static int erst_open_pstore(struct pstore_info *psi);
 static int erst_close_pstore(struct pstore_info *psi);
 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
-                          struct timespec *time, struct pstore_info *psi);
+                          struct timespec *time, char **buf,
+                          struct pstore_info *psi);
 static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
                       size_t size, struct pstore_info *psi);
 static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
 }
 
 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
-                          struct timespec *time, struct pstore_info *psi)
+                          struct timespec *time, char **buf,
+                          struct pstore_info *psi)
 {
        int rc;
        ssize_t len = 0;
        u64 record_id;
-       struct cper_pstore_record *rcd = (struct cper_pstore_record *)
-                                       (erst_info.buf - sizeof(*rcd));
+       struct cper_pstore_record *rcd;
+       size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
 
        if (erst_disable)
                return -ENODEV;
 
+       rcd = kmalloc(rcd_len, GFP_KERNEL);
+       if (!rcd) {
+               rc = -ENOMEM;
+               goto out;
+       }
 skip:
        rc = erst_get_record_id_next(&reader_pos, &record_id);
        if (rc)
@@ -1004,22 +1011,27 @@ skip:
 
        /* no more record */
        if (record_id == APEI_ERST_INVALID_RECORD_ID) {
-               rc = -1;
+               rc = -EINVAL;
                goto out;
        }
 
-       len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
-                       erst_info.bufsize);
+       len = erst_read(record_id, &rcd->hdr, rcd_len);
        /* The record may be cleared by others, try read next record */
        if (len == -ENOENT)
                goto skip;
-       else if (len < 0) {
-               rc = -1;
+       else if (len < sizeof(*rcd)) {
+               rc = -EIO;
                goto out;
        }
        if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
                goto skip;
 
+       *buf = kmalloc(len, GFP_KERNEL);
+       if (*buf == NULL) {
+               rc = -ENOMEM;
+               goto out;
+       }
+       memcpy(*buf, rcd->data, len - sizeof(*rcd));
        *id = record_id;
        if (uuid_le_cmp(rcd->sec_hdr.section_type,
                        CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
        time->tv_nsec = 0;
 
 out:
+       kfree(rcd);
        return (rc < 0) ? rc : (len - sizeof(*rcd));
 }
 
index 6bdedd7cca2cd3cd630370732b71b8b530992ca9..cf047c406d92797777d6255122f23b5a8297c58a 100644 (file)
@@ -820,7 +820,7 @@ config PATA_PLATFORM
 
 config PATA_OF_PLATFORM
        tristate "OpenFirmware platform device PATA support"
-       depends on PATA_PLATFORM && OF
+       depends on PATA_PLATFORM && OF && OF_IRQ
        help
          This option enables support for generic directly connected ATA
          devices commonly found on embedded systems with OpenFirmware
index ec555951176eddb717de85d010dde532151a3127..43b875810d1b7b91c98386fb6f3a5c58be79fdb9 100644 (file)
@@ -67,7 +67,7 @@ static int __init ahci_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ahci_platform_data *pdata = dev_get_platdata(dev);
        const struct platform_device_id *id = platform_get_device_id(pdev);
-       struct ata_port_info pi = ahci_port_info[id->driver_data];
+       struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
        const struct ata_port_info *ppi[] = { &pi, NULL };
        struct ahci_host_priv *hpriv;
        struct ata_host *host;
index 63d53277d6a92f4393cd22563d75c4b9b8d32462..4cadfa28f940450ee2f5a890af44e34ad583e921 100644 (file)
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
        if (rc)
                goto out;
 
+#ifdef CONFIG_ATA_BMDMA
        if (bmdma)
                /* prepare and activate BMDMA host */
                rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
        else
+#endif
                /* prepare and activate SFF host */
                rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
        if (rc)
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
        host->private_data = host_priv;
        host->flags |= hflags;
 
+#ifdef CONFIG_ATA_BMDMA
        if (bmdma) {
                pci_set_master(pdev);
                rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
        } else
+#endif
                rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
 out:
        if (rc == 0)
index 3d0c2b0fed9cefcdd724487a9e9af00af1fd61d4..9e373ba203082f8bc0cb5757a011a3efccf7b291 100644 (file)
@@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev)
           if (ia_vcc == NULL)
           {
              atomic_inc(&vcc->stats->rx_err);
+             atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
-             atm_return(vcc, atm_guess_pdu2truesize(len));
              goto INCR_DLE;
            }
           // get real pkt length  pwang_test
@@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev)
              atomic_inc(&vcc->stats->rx_err);
              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
                                                             length, skb->len);)
+             atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
-             atm_return(vcc, atm_guess_pdu2truesize(len));
              goto INCR_DLE;
           }
           skb_trim(skb, length);
index 82c865452c7080d180ec6a453ab1efb3e988dac3..919daa7cd5b1db3443c65863cc07c20c830f680b 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kallsyms.h>
 #include <linux/mutex.h>
 #include <linux/async.h>
+#include <linux/pm_runtime.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -1743,6 +1744,10 @@ void device_shutdown(void)
                list_del_init(&dev->kobj.entry);
                spin_unlock(&devices_kset->list_lock);
 
+               /* Don't allow any more runtime suspends */
+               pm_runtime_get_noresume(dev);
+               pm_runtime_barrier(dev);
+
                if (dev->bus && dev->bus->shutdown) {
                        dev_dbg(dev, "shutdown\n");
                        dev->bus->shutdown(dev);
index 793f796c4da3e1cd20143e1266d41246f2289b73..5693ecee9a4052a339b9eca105ff3704fffe29b4 100644 (file)
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       nid, K(node_page_state(nid, NR_WRITEBACK)),
                       nid, K(node_page_state(nid, NR_FILE_PAGES)),
                       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
-                      nid, K(node_page_state(nid, NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                      nid, K(node_page_state(nid, NR_ANON_PAGES)
                        + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
-                       HPAGE_PMD_NR
+                       HPAGE_PMD_NR),
+#else
+                      nid, K(node_page_state(nid, NR_ANON_PAGES)),
 #endif
-                      ),
                       nid, K(node_page_state(nid, NR_SHMEM)),
                       nid, node_page_state(nid, NR_KERNEL_STACK) *
                                THREAD_SIZE / 1024,
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
                                node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
                       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
-                      nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                      nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
                        , nid,
                        K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
-                       HPAGE_PMD_NR)
+                       HPAGE_PMD_NR));
+#else
+                      nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
 #endif
-                      );
        n += hugetlb_report_node_meminfo(nid, buf + n);
        return n;
 }
index 5f0f85d5c5765558a25fa7ec165be968e5fbcabf..428e55e012dcd06f91ade3154c941aab6f1d5bb5 100644 (file)
@@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev)
 
        list_for_each_entry_reverse(ce, &psd->clock_list, node) {
                if (ce->status < PCE_STATUS_ERROR) {
-                       clk_disable(ce->clk);
+                       if (ce->status == PCE_STATUS_ENABLED)
+                               clk_disable(ce->clk);
                        ce->status = PCE_STATUS_ACQUIRED;
                }
        }
index 7fa098464dae62921a5acf32af3537f649b604fe..c3d2dfcf438dd1735fcf401be9f8185440e9a1a9 100644 (file)
@@ -920,7 +920,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  End:
        if (!error) {
                dev->power.is_suspended = true;
-               if (dev->power.wakeup_path && dev->parent)
+               if (dev->power.wakeup_path
+                   && dev->parent && !dev->parent->power.ignore_children)
                        dev->parent->power.wakeup_path = true;
        }
 
index 30a94eadc200c7f89f7e2448126ad93cd6a6a2f2..86de6c50fc4181f796c5d0e3d20e796380f112b0 100644 (file)
@@ -212,11 +212,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
        if (!dev || !req) /*guard against callers passing in null */
                return -EINVAL;
 
-       if (dev_pm_qos_request_active(req)) {
-               WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
-                       "added request\n");
+       if (WARN(dev_pm_qos_request_active(req),
+                "%s() called for already added request\n", __func__))
                return -EINVAL;
-       }
 
        req->dev = dev;
 
@@ -271,11 +269,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
        if (!req) /*guard against callers passing in null */
                return -EINVAL;
 
-       if (!dev_pm_qos_request_active(req)) {
-               WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
-                       "unknown object\n");
+       if (WARN(!dev_pm_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
                return -EINVAL;
-       }
 
        mutex_lock(&dev_pm_qos_mtx);
 
@@ -312,11 +308,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
        if (!req) /*guard against callers passing in null */
                return -EINVAL;
 
-       if (!dev_pm_qos_request_active(req)) {
-               WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
-                       "unknown object\n");
+       if (WARN(!dev_pm_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
                return -EINVAL;
-       }
 
        mutex_lock(&dev_pm_qos_mtx);
 
index 486f94ef24d499bfe0d17eb379fe5043a94f36a5..587cce57adae23fcddb22270de69ae9ce2bff76b 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -2600,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
                        c->Request.Timeout = 0;
                        c->Request.CDB[0] = BMIC_WRITE;
                        c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+                       c->Request.CDB[7] = (size >> 8) & 0xFF;
+                       c->Request.CDB[8] = size & 0xFF;
                        break;
                case TEST_UNIT_READY:
                        c->Request.CDBLen = 6;
@@ -4319,6 +4322,10 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
                dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
                return -ENODEV;
        }
+
+       pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
+                               PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+
        err = pci_enable_device(h->pdev);
        if (err) {
                dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
@@ -4875,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
 {
        if (h->msix_vector || h->msi_vector) {
                if (!request_irq(h->intr[h->intr_mode], msixhandler,
-                               IRQF_DISABLED, h->devname, h))
+                               0, h->devname, h))
                        return 0;
                dev_err(&h->pdev->dev, "Unable to get msi irq %d"
                        " for %s\n", h->intr[h->intr_mode],
@@ -4884,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
        }
 
        if (!request_irq(h->intr[h->intr_mode], intxhandler,
-                       IRQF_DISABLED, h->devname, h))
+                       IRQF_SHARED, h->devname, h))
                return 0;
        dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
                h->intr[h->intr_mode], h->devname);
@@ -5158,6 +5165,7 @@ reinit_after_soft_reset:
        h->cciss_max_sectors = 8192;
 
        rebuild_lun_table(h, 1, 0);
+       cciss_engage_scsi(h);
        h->busy_initializing = 0;
        return 1;
 
index 951a4e33b92b788b208d6320636b77422bf9474f..e820b68d2f6cd4d74382b85b5e020069294f00f3 100644 (file)
@@ -1720,5 +1720,6 @@ static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
 /* If no tape support, then these become defined out of existence */
 
 #define cciss_scsi_setup(cntl_num)
+#define cciss_engage_scsi(h)
 
 #endif /* CONFIG_CISS_SCSI_TAPE */
index 3d806820280e3bc4aaa5e81d6bea411f6597e400..1e888c9e85b3dd10a42238a6588dd4f0b2f7c9e1 100644 (file)
@@ -161,17 +161,19 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
        &xor_funcs
 };
 
-static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
 {
-       loff_t size, offset, loopsize;
+       loff_t size, loopsize;
 
        /* Compute loopsize in bytes */
        size = i_size_read(file->f_mapping->host);
-       offset = lo->lo_offset;
        loopsize = size - offset;
-       if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
-               loopsize = lo->lo_sizelimit;
+       /* offset is beyond i_size, wierd but possible */
+       if (loopsize < 0)
+               return 0;
 
+       if (sizelimit > 0 && sizelimit < loopsize)
+               loopsize = sizelimit;
        /*
         * Unfortunately, if we want to do I/O on the device,
         * the number of 512-byte sectors has to fit into a sector_t.
@@ -179,17 +181,25 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
        return loopsize >> 9;
 }
 
+static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+{
+       return get_size(lo->lo_offset, lo->lo_sizelimit, file);
+}
+
 static int
-figure_loop_size(struct loop_device *lo)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
 {
-       loff_t size = get_loop_size(lo, lo->lo_backing_file);
+       loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
        sector_t x = (sector_t)size;
 
        if (unlikely((loff_t)x != size))
                return -EFBIG;
-
+       if (lo->lo_offset != offset)
+               lo->lo_offset = offset;
+       if (lo->lo_sizelimit != sizelimit)
+               lo->lo_sizelimit = sizelimit;
        set_capacity(lo->lo_disk, x);
-       return 0;                                       
+       return 0;
 }
 
 static inline int
@@ -372,7 +382,8 @@ do_lo_receive(struct loop_device *lo,
 
        if (retval < 0)
                return retval;
-
+       if (retval != bvec->bv_len)
+               return -EIO;
        return 0;
 }
 
@@ -411,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 
                /*
                 * We use punch hole to reclaim the free space used by the
-                * image a.k.a. discard. However we do support discard if
+                * image a.k.a. discard. However we do not support discard if
                 * encryption is enabled, because it may give an attacker
                 * useful information.
                 */
@@ -786,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
        }
 
        q->limits.discard_granularity = inode->i_sb->s_blocksize;
-       q->limits.discard_alignment = inode->i_sb->s_blocksize;
+       q->limits.discard_alignment = 0;
        q->limits.max_discard_sectors = UINT_MAX >> 9;
        q->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
@@ -1058,9 +1069,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
 
        if (lo->lo_offset != info->lo_offset ||
            lo->lo_sizelimit != info->lo_sizelimit) {
-               lo->lo_offset = info->lo_offset;
-               lo->lo_sizelimit = info->lo_sizelimit;
-               if (figure_loop_size(lo))
+               if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
                        return -EFBIG;
        }
        loop_config_discard(lo);
@@ -1246,7 +1255,7 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
        err = -ENXIO;
        if (unlikely(lo->lo_state != Lo_bound))
                goto out;
-       err = figure_loop_size(lo);
+       err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
        if (unlikely(err))
                goto out;
        sec = get_capacity(lo->lo_disk);
@@ -1284,13 +1293,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
                        goto out_unlocked;
                break;
        case LOOP_SET_STATUS:
-               err = loop_set_status_old(lo, (struct loop_info __user *) arg);
+               err = -EPERM;
+               if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+                       err = loop_set_status_old(lo,
+                                       (struct loop_info __user *)arg);
                break;
        case LOOP_GET_STATUS:
                err = loop_get_status_old(lo, (struct loop_info __user *) arg);
                break;
        case LOOP_SET_STATUS64:
-               err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
+               err = -EPERM;
+               if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+                       err = loop_set_status64(lo,
+                                       (struct loop_info64 __user *) arg);
                break;
        case LOOP_GET_STATUS64:
                err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
index 6b9a2000d56affdd7788ff26581ae587f3937f96..a79fb4f7ff622632809c2b93f47df93fb7561ccb 100644 (file)
@@ -630,6 +630,7 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t
                if (dev->status & 0x10)
                        return -ETIME;
 
+       memset(&hdr, 0, sizeof(hdr));
        hdr.magic = PG_MAGIC;
        hdr.dlen = dev->dlen;
        copy = 0;
index 65cc424359b05e735703b1b9a74277cb1f1359a9..148ab944378d57bdaec596e96bed6dc131777a28 100644 (file)
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list);      /* clients */
 
 static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
 static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t size);
 static ssize_t rbd_snap_add(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
        u32 snap_count = le32_to_cpu(ondisk->snap_count);
        int ret = -ENOMEM;
 
+       if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+               return -ENXIO;
+       }
+
        init_rwsem(&header->snap_rwsem);
        header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1355,32 +1355,6 @@ fail:
        return ret;
 }
 
-/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
-                                    u64 snapid,
-                                    const char *obj)
-{
-       struct ceph_osd_req_op *ops;
-       int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
-       if (ret < 0)
-               return ret;
-
-       ops[0].snap.snapid = snapid;
-
-       ret = rbd_req_sync_op(dev, NULL,
-                              CEPH_NOSNAP,
-                              0,
-                              CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-                              ops,
-                              1, obj, 0, 0, NULL, NULL, NULL);
-
-       rbd_destroy_ops(ops);
-
-       return ret;
-}
-
 /*
  * Request sync osd read
  */
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
                        goto out_dh;
 
                rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (rc == -ENXIO) {
+                               pr_warning("unrecognized header format"
+                                          " for image %s", rbd_dev->obj);
+                       }
                        goto out_dh;
+               }
 
                if (snap_count != header->total_snaps) {
                        snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
 static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
 
 static struct attribute *rbd_attrs[] = {
        &dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
        &dev_attr_current_snap.attr,
        &dev_attr_refresh.attr,
        &dev_attr_create_snap.attr,
-       &dev_attr_rollback_snap.attr,
        NULL
 };
 
@@ -2424,64 +2401,6 @@ err_unlock:
        return ret;
 }
 
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t count)
-{
-       struct rbd_device *rbd_dev = dev_to_rbd(dev);
-       int ret;
-       u64 snapid;
-       u64 cur_ofs;
-       char *seg_name = NULL;
-       char *snap_name = kmalloc(count + 1, GFP_KERNEL);
-       ret = -ENOMEM;
-       if (!snap_name)
-               return ret;
-
-       /* parse snaps add command */
-       snprintf(snap_name, count, "%s", buf);
-       seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
-       if (!seg_name)
-               goto done;
-
-       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
-       ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
-       if (ret < 0)
-               goto done_unlock;
-
-       dout("snapid=%lld\n", snapid);
-
-       cur_ofs = 0;
-       while (cur_ofs < rbd_dev->header.image_size) {
-               cur_ofs += rbd_get_segment(&rbd_dev->header,
-                                          rbd_dev->obj,
-                                          cur_ofs, (u64)-1,
-                                          seg_name, NULL);
-               dout("seg_name=%s\n", seg_name);
-
-               ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
-               if (ret < 0)
-                       pr_warning("could not roll back obj %s err=%d\n",
-                                  seg_name, ret);
-       }
-
-       ret = __rbd_update_snaps(rbd_dev);
-       if (ret < 0)
-               goto done_unlock;
-
-       ret = count;
-
-done_unlock:
-       mutex_unlock(&ctl_mutex);
-done:
-       kfree(seg_name);
-       kfree(snap_name);
-
-       return ret;
-}
-
 static struct bus_attribute rbd_bus_attrs[] = {
        __ATTR(add, S_IWUSR, NULL, rbd_add),
        __ATTR(remove, S_IWUSR, NULL, rbd_remove),
index ae3e167e17adc3f18bc4b14fe3b1635d299bfb3e..89ddab127e33df525924b73f23501fc9fcab1006 100644 (file)
@@ -16,6 +16,8 @@
  * handle GCR disks
  */
 
+#undef DEBUG
+
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
 #define MAX_FLOPPIES   2
 
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
 enum swim_state {
        idle,
        locating,
@@ -177,7 +177,6 @@ struct swim3 {
 
 struct floppy_state {
        enum swim_state state;
-       spinlock_t lock;
        struct swim3 __iomem *swim3;    /* hardware registers */
        struct dbdma_regs __iomem *dma; /* DMA controller registers */
        int     swim3_intr;     /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
        int     wanted;
        struct macio_dev *mdev;
        char    dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+       int     index;
+       struct request *cur_req;
 };
 
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...)        dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...)        dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
 static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
        0, 0, 0, 0, 0, 0
 };
 
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
-                       void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
 static void seek_track(struct floppy_state *fs, int n);
 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
 static void act(struct floppy_state *fs);
 static void scan_timeout(unsigned long data);
 static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
                                        unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
 {
-       if (__blk_end_request(fd_req, err, nr_bytes))
-               return true;
+       struct request *req = fs->cur_req;
+       int rc;
 
-       fd_req = NULL;
-       return false;
-}
+       swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
+                 err, nr_bytes, req);
 
-static bool swim3_end_request_cur(int err)
-{
-       return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+       if (err)
+               nr_bytes = blk_rq_cur_bytes(req);
+       rc = __blk_end_request(req, err, nr_bytes);
+       if (rc)
+               return true;
+       fs->cur_req = NULL;
+       return false;
 }
 
 static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
        return (stat & DATA) == 0;
 }
 
-static void do_fd_request(struct request_queue * q)
-{
-       int i;
-
-       for(i=0; i<floppy_count; i++) {
-               struct floppy_state *fs = &floppy_states[i];
-               if (fs->mdev->media_bay &&
-                   check_media_bay(fs->mdev->media_bay) != MB_FD)
-                       continue;
-               start_request(fs);
-       }
-}
-
 static void start_request(struct floppy_state *fs)
 {
        struct request *req;
        unsigned long x;
 
+       swim3_dbg("start request, initial state=%d\n", fs->state);
+
        if (fs->state == idle && fs->wanted) {
                fs->state = available;
                wake_up(&fs->wait);
                return;
        }
        while (fs->state == idle) {
-               if (!fd_req) {
-                       fd_req = blk_fetch_request(swim3_queue);
-                       if (!fd_req)
+               swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+               if (!fs->cur_req) {
+                       fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+                       swim3_dbg("  fetched request %p\n", fs->cur_req);
+                       if (!fs->cur_req)
                                break;
                }
-               req = fd_req;
-#if 0
-               printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
-                      req->rq_disk->disk_name, req->cmd,
-                      (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
-               printk("           errors=%d current_nr_sectors=%u\n",
-                      req->errors, blk_rq_cur_sectors(req));
+               req = fs->cur_req;
+
+               if (fs->mdev->media_bay &&
+                   check_media_bay(fs->mdev->media_bay) != MB_FD) {
+                       swim3_dbg("%s", "  media bay absent, dropping req\n");
+                       swim3_end_request(fs, -ENODEV, 0);
+                       continue;
+               }
+
+#if 0 /* This is really too verbose */
+               swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+                         req->rq_disk->disk_name, req->cmd,
+                         (long)blk_rq_pos(req), blk_rq_sectors(req),
+                         req->buffer);
+               swim3_dbg("           errors=%d current_nr_sectors=%u\n",
+                         req->errors, blk_rq_cur_sectors(req));
 #endif
 
                if (blk_rq_pos(req) >= fs->total_secs) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
+                                 (long)blk_rq_pos(req), (long)fs->total_secs);
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
                if (fs->ejected) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("%s", "  disk ejected\n");
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
 
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
                        if (fs->write_prot < 0)
                                fs->write_prot = swim3_readbit(fs, WRITE_PROT);
                        if (fs->write_prot) {
-                               swim3_end_request_cur(-EIO);
+                               swim3_dbg("%s", "  try to write, disk write protected\n");
+                               swim3_end_request(fs, -EIO, 0);
                                continue;
                        }
                }
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
                x = ((long)blk_rq_pos(req)) % fs->secpercyl;
                fs->head = x / fs->secpertrack;
                fs->req_sector = x % fs->secpertrack + 1;
-               fd_req = req;
                fs->state = do_transfer;
                fs->retries = 0;
 
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
        }
 }
 
+static void do_fd_request(struct request_queue * q)
+{
+       start_request(q->queuedata);
+}
+
 static void set_timeout(struct floppy_state *fs, int nticks,
                        void (*proc)(unsigned long))
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&fs->lock, flags);
        if (fs->timeout_pending)
                del_timer(&fs->timeout);
        fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
        fs->timeout.data = (unsigned long) fs;
        add_timer(&fs->timeout);
        fs->timeout_pending = 1;
-       spin_unlock_irqrestore(&fs->lock, flags);
 }
 
 static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_cmd *cp = fs->dma_cmd;
        struct dbdma_regs __iomem *dr = fs->dma;
+       struct request *req = fs->cur_req;
 
-       if (blk_rq_cur_sectors(fd_req) <= 0) {
-               printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+       if (blk_rq_cur_sectors(req) <= 0) {
+               swim3_warn("%s", "Transfer 0 sectors ?\n");
                return;
        }
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                n = 1;
        else {
                n = fs->secpertrack - fs->req_sector + 1;
-               if (n > blk_rq_cur_sectors(fd_req))
-                       n = blk_rq_cur_sectors(fd_req);
+               if (n > blk_rq_cur_sectors(req))
+                       n = blk_rq_cur_sectors(req);
        }
+
+       swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
+                 fs->req_sector, fs->secpertrack, fs->head, n);
+
        fs->scount = n;
        swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
        out_8(&sw->sector, fs->req_sector);
        out_8(&sw->nsect, n);
        out_8(&sw->gap3, 0);
        out_le32(&dr->cmdptr, virt_to_bus(cp));
-       if (rq_data_dir(fd_req) == WRITE) {
+       if (rq_data_dir(req) == WRITE) {
                /* Set up 3 dma commands: write preamble, data, postamble */
                init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
                ++cp;
-               init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+               init_dma(cp, OUTPUT_MORE, req->buffer, 512);
                ++cp;
                init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
        } else {
-               init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+               init_dma(cp, INPUT_LAST, req->buffer, n * 512);
        }
        ++cp;
        out_le16(&cp->command, DBDMA_STOP);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        in_8(&sw->error);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                out_8(&sw->control_bis, WRITE_SECTORS);
        in_8(&sw->intr);
        out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
 static void act(struct floppy_state *fs)
 {
        for (;;) {
+               swim3_dbg("  act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+                         fs->state, fs->req_cyl, fs->cur_cyl);
+
                switch (fs->state) {
                case idle:
                        return;         /* XXX shouldn't get here */
 
                case locating:
                        if (swim3_readbit(fs, TRACK_ZERO)) {
+                               swim3_dbg("%s", "    locate track 0\n");
                                fs->cur_cyl = 0;
                                if (fs->req_cyl == 0)
                                        fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
                                break;
                        }
                        if (fs->req_cyl == fs->cur_cyl) {
-                               printk("whoops, seeking 0\n");
+                               swim3_warn("%s", "Whoops, seeking 0\n");
                                fs->state = do_transfer;
                                break;
                        }
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
                case do_transfer:
                        if (fs->cur_cyl != fs->req_cyl) {
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+                                                 fs->req_cyl, fs->cur_cyl);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        return;
                                }
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
                        return;
 
                default:
-                       printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+                       swim3_err("Unknown state %d\n", fs->state);
                        return;
                }
        }
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* scan timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
        fs->cur_cyl = -1;
        if (fs->retries > 5) {
-               swim3_end_request_cur(-EIO);
+               swim3_end_request(fs, -EIO, 0);
                fs->state = idle;
                start_request(fs);
        } else {
                fs->state = jogging;
                act(fs);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void seek_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* seek timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_SEEK);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
-       printk(KERN_ERR "swim3: seek timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void settle_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* settle timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        if (swim3_readbit(fs, SEEK_COMPLETE)) {
                out_8(&sw->select, RELAX);
                fs->state = locating;
                act(fs);
-               return;
+               goto unlock;
        }
        out_8(&sw->select, RELAX);
        if (fs->settle_time < 2*HZ) {
                ++fs->settle_time;
                set_timeout(fs, 1, settle_timeout);
-               return;
+               goto unlock;
        }
-       printk(KERN_ERR "swim3: seek settle timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek settle timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+ unlock:
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_regs __iomem *dr = fs->dma;
+       unsigned long flags;
        int n;
 
+       swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_le32(&dr->control, RUN << 16);
        /* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
        out_8(&sw->select, RELAX);
-       printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-              (rq_data_dir(fd_req)==WRITE? "writ": "read"),
-              (long)blk_rq_pos(fd_req));
-       swim3_end_request_cur(-EIO);
+       swim3_err("Timeout %sing sector %ld\n",
+              (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+              (long)blk_rq_pos(fs->cur_req));
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
        int stat, resid;
        struct dbdma_regs __iomem *dr;
        struct dbdma_cmd *cp;
+       unsigned long flags;
+       struct request *req = fs->cur_req;
+
+       swim3_dbg("* interrupt, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        intr = in_8(&sw->intr);
        err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
        if ((intr & ERROR_INTR) && fs->state != do_transfer)
-               printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
-                      fs->state, rq_data_dir(fd_req), intr, err);
+               swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+                         fs->state, rq_data_dir(req), intr, err);
        switch (fs->state) {
        case locating:
                if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        del_timer(&fs->timeout);
                        fs->timeout_pending = 0;
                        if (sw->ctrack == 0xff) {
-                               printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+                               swim3_err("%s", "Seen sector but cyl=ff?\n");
                                fs->cur_cyl = -1;
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        start_request(fs);
                                } else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        fs->cur_cyl = sw->ctrack;
                        fs->cur_sector = sw->csect;
                        if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
-                               printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
-                                      fs->expect_cyl, fs->cur_cyl);
+                               swim3_err("Expected cyl %d, got %d\n",
+                                         fs->expect_cyl, fs->cur_cyl);
                        fs->state = do_transfer;
                        act(fs);
                }
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                fs->timeout_pending = 0;
                dr = fs->dma;
                cp = fs->dma_cmd;
-               if (rq_data_dir(fd_req) == WRITE)
+               if (rq_data_dir(req) == WRITE)
                        ++cp;
                /*
                 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                if (intr & ERROR_INTR) {
                        n = fs->scount - 1 - resid / 512;
                        if (n > 0) {
-                               blk_update_request(fd_req, 0, n << 9);
+                               blk_update_request(req, 0, n << 9);
                                fs->req_sector += n;
                        }
                        if (fs->retries < 5) {
                                ++fs->retries;
                                act(fs);
                        } else {
-                               printk("swim3: error %sing block %ld (err=%x)\n",
-                                      rq_data_dir(fd_req) == WRITE? "writ": "read",
-                                      (long)blk_rq_pos(fd_req), err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("Error %sing block %ld (err=%x)\n",
+                                      rq_data_dir(req) == WRITE? "writ": "read",
+                                      (long)blk_rq_pos(req), err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                        }
                } else {
                        if ((stat & ACTIVE) == 0 || resid != 0) {
                                /* musta been an error */
-                               printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
-                               printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
-                                      fs->state, rq_data_dir(fd_req), intr, err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+                               swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
+                                         fs->state, rq_data_dir(req), intr, err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                                start_request(fs);
                                break;
                        }
-                       if (swim3_end_request(0, fs->scount << 9)) {
+                       fs->retries = 0;
+                       if (swim3_end_request(fs, 0, fs->scount << 9)) {
                                fs->req_sector += fs->scount;
                                if (fs->req_sector > fs->secpertrack) {
                                        fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        start_request(fs);
                break;
        default:
-               printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+               swim3_err("Don't know what to do in state %d\n", fs->state);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
 }
 */
 
+/* Called under the mutex to grab exclusive access to a drive */
 static int grab_drive(struct floppy_state *fs, enum swim_state state,
                      int interruptible)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
-       if (fs->state != idle) {
+       swim3_dbg("%s", "-> grab drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
+       if (fs->state != idle && fs->state != available) {
                ++fs->wanted;
                while (fs->state != available) {
+                       spin_unlock_irqrestore(&swim3_lock, flags);
                        if (interruptible && signal_pending(current)) {
                                --fs->wanted;
-                               spin_unlock_irqrestore(&fs->lock, flags);
                                return -EINTR;
                        }
                        interruptible_sleep_on(&fs->wait);
+                       spin_lock_irqsave(&swim3_lock, flags);
                }
                --fs->wanted;
        }
        fs->state = state;
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
+
        return 0;
 }
 
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
+       swim3_dbg("%s", "-> release drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->state = idle;
        start_request(fs);
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
        struct floppy_state *fs = disk->private_data;
        struct swim3 __iomem *sw = fs->swim3;
+
        mutex_lock(&swim3_mutex);
        if (fs->ref_count > 0 && --fs->ref_count == 0) {
                swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
        .revalidate_disk= floppy_revalidate,
 };
 
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+       struct floppy_state *fs = macio_get_drvdata(mdev);
+       struct swim3 __iomem *sw = fs->swim3;
+
+       if (!fs)
+               return;
+       if (mb_state != MB_FD)
+               return;
+
+       /* Clear state */
+       out_8(&sw->intr_enable, 0);
+       in_8(&sw->intr);
+       in_8(&sw->error);
+}
+
 static int swim3_add_device(struct macio_dev *mdev, int index)
 {
        struct device_node *swim = mdev->ofdev.dev.of_node;
        struct floppy_state *fs = &floppy_states[index];
        int rc = -EBUSY;
 
+       /* Do this first for message macros */
+       memset(fs, 0, sizeof(*fs));
+       fs->mdev = mdev;
+       fs->index = index;
+
        /* Check & Request resources */
        if (macio_resource_count(mdev) < 2) {
-               printk(KERN_WARNING "ifd%d: no address for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "No address in device-tree\n");
                return -ENXIO;
        }
-       if (macio_irq_count(mdev) < 2) {
-               printk(KERN_WARNING "fd%d: no intrs for device %s\n",
-                       index, swim->full_name);
+       if (macio_irq_count(mdev) < 1) {
+               swim3_err("%s", "No interrupt in device-tree\n");
+               return -ENXIO;
        }
        if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
-               printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request mmio resource\n");
                return -EBUSY;
        }
        if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
-               printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request dma resource\n");
                macio_release_resource(mdev, 0);
                return -EBUSY;
        }
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        if (mdev->media_bay == NULL)
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
        
-       memset(fs, 0, sizeof(*fs));
-       spin_lock_init(&fs->lock);
        fs->state = idle;
        fs->swim3 = (struct swim3 __iomem *)
                ioremap(macio_resource_start(mdev, 0), 0x200);
        if (fs->swim3 == NULL) {
-               printk("fd%d: couldn't map registers for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map mmio registers\n");
                rc = -ENOMEM;
                goto out_release;
        }
        fs->dma = (struct dbdma_regs __iomem *)
                ioremap(macio_resource_start(mdev, 1), 0x200);
        if (fs->dma == NULL) {
-               printk("fd%d: couldn't map DMA for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map dma registers\n");
                iounmap(fs->swim3);
                rc = -ENOMEM;
                goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        fs->secpercyl = 36;
        fs->secpertrack = 18;
        fs->total_secs = 2880;
-       fs->mdev = mdev;
        init_waitqueue_head(&fs->wait);
 
        fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
        memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
        st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
 
+       if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+               swim3_mb_event(mdev, MB_FD);
+
        if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
-               printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
-                      index, fs->swim3_intr, swim->full_name);
+               swim3_err("%s", "Couldn't request interrupt\n");
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
                goto out_unmap;
                return -EBUSY;
        }
-/*
-       if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
-               printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
-                      fs->dma_intr);
-               return -EBUSY;
-       }
-*/
 
        init_timer(&fs->timeout);
 
-       printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+       swim3_info("SWIM3 floppy controller %s\n",
                mdev->media_bay ? "in media bay" : "");
 
        return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
 
 static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
-       int i, rc;
        struct gendisk *disk;
+       int index, rc;
+
+       index = floppy_count++;
+       if (index >= MAX_FLOPPIES)
+               return -ENXIO;
 
        /* Add the drive */
-       rc = swim3_add_device(mdev, floppy_count);
+       rc = swim3_add_device(mdev, index);
        if (rc)
                return rc;
+       /* Now register that disk. Same comment about failure handling */
+       disk = disks[index] = alloc_disk(1);
+       if (disk == NULL)
+               return -ENOMEM;
+       disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+       if (disk->queue == NULL) {
+               put_disk(disk);
+               return -ENOMEM;
+       }
+       disk->queue->queuedata = &floppy_states[index];
 
-       /* Now create the queue if not there yet */
-       if (swim3_queue == NULL) {
+       if (index == 0) {
                /* If we failed, there isn't much we can do as the driver is still
                 * too dumb to remove the device, just bail out
                 */
                if (register_blkdev(FLOPPY_MAJOR, "fd"))
                        return 0;
-               swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
-               if (swim3_queue == NULL) {
-                       unregister_blkdev(FLOPPY_MAJOR, "fd");
-                       return 0;
-               }
        }
 
-       /* Now register that disk. Same comment about failure handling */
-       i = floppy_count++;
-       disk = disks[i] = alloc_disk(1);
-       if (disk == NULL)
-               return 0;
-
        disk->major = FLOPPY_MAJOR;
-       disk->first_minor = i;
+       disk->first_minor = index;
        disk->fops = &floppy_fops;
-       disk->private_data = &floppy_states[i];
-       disk->queue = swim3_queue;
+       disk->private_data = &floppy_states[index];
        disk->flags |= GENHD_FL_REMOVABLE;
-       sprintf(disk->disk_name, "fd%d", i);
+       sprintf(disk->disk_name, "fd%d", index);
        set_capacity(disk, 2880);
        add_disk(disk);
 
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
                .of_match_table = swim3_match,
        },
        .probe          = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+       .mediabay_event = swim3_mb_event,
+#endif
 #if 0
        .suspend        = swim3_suspend,
        .resume         = swim3_resume,
index c2917ffad2c2a311beeeaed7f8171196601d78ff..34767a6d7f42a354edca655d901c298e3883609a 100644 (file)
 #define IPMI_WDOG_SET_TIMER            0x24
 #define IPMI_WDOG_GET_TIMER            0x25
 
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP  0x80
+
 /* These are here until the real ones get into the watchdog.h interface. */
 #ifndef WDIOC_GETTIMEOUT
 #define        WDIOC_GETTIMEOUT        _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
        struct kernel_ipmi_msg            msg;
        int                               rv;
        struct ipmi_system_interface_addr addr;
+       int                               timeout_retries = 0;
 
        if (ipmi_ignore_heartbeat)
                return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
 
        mutex_lock(&heartbeat_lock);
 
+restart:
        atomic_set(&heartbeat_tofree, 2);
 
        /*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
        /* Wait for the heartbeat to be sent. */
        wait_for_completion(&heartbeat_wait);
 
-       if (heartbeat_recv_msg.msg.data[0] != 0) {
+       if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)  {
+               timeout_retries++;
+               if (timeout_retries > 3) {
+                       printk(KERN_ERR PFX ": Unable to restore the IPMI"
+                              " watchdog's settings, giving up.\n");
+                       rv = -EIO;
+                       goto out_unlock;
+               }
+
+               /*
+                * The timer was not initialized, that means the BMC was
+                * probably reset and lost the watchdog information.  Attempt
+                * to restore the timer's info.  Note that we still hold
+                * the heartbeat lock, to keep a heartbeat from happening
+                * in this process, so must say no heartbeat to avoid a
+                * deadlock on this mutex.
+                */
+               rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+               if (rv) {
+                       printk(KERN_ERR PFX ": Unable to send the command to"
+                              " set the watchdog's settings, giving up.\n");
+                       goto out_unlock;
+               }
+
+               /* We might need a new heartbeat, so do it now */
+               goto restart;
+       } else if (heartbeat_recv_msg.msg.data[0] != 0) {
                /*
                 * Got an error in the heartbeat response.  It was already
                 * reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
                rv = -EINVAL;
        }
 
+out_unlock:
        mutex_unlock(&heartbeat_lock);
 
        return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
 static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
                                  void                 *handler_data)
 {
-       if (msg->msg.data[0] != 0) {
+       if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+                       msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+               printk(KERN_INFO PFX "response: The IPMI controller appears"
+                      " to have been reset, will attempt to reinitialize"
+                      " the watchdog timer\n");
+       else if (msg->msg.data[0] != 0)
                printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
                       msg->msg.data[0],
                       msg->msg.cmd);
-       }
 
        ipmi_free_recv_msg(msg);
 }
index 63e19ba56bbea5a12ee784f023bd13a758eaf611..6035ab8d5ef7e25e6eae89fbfdc53476f248de8b 100644 (file)
@@ -941,7 +941,7 @@ void get_random_bytes(void *buf, int nbytes)
                if (!arch_get_random_long(&v))
                        break;
                
-               memcpy(buf, &v, chunk);
+               memcpy(p, &v, chunk);
                p += chunk;
                nbytes -= chunk;
        }
index 5c6f56f21443ae7e01a1b9fc90b3202674fbaa37..dcd8babae9eb36fe864bae433558e5e1b4c77d83 100644 (file)
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
                else
                        op.config |= CFG_MID_FRAG;
 
-               writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
-               writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
-               writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
-               writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
-               writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+               if (first_block) {
+                       writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+                       writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+                       writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+                       writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+                       writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+               }
        }
 
        memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
index 643b055ed3cdc619d07166e8eaa84b2f849252df..8f0491037080df38856b76ec73db77cf18d3320f 100644 (file)
@@ -1,36 +1,29 @@
-config ARCH_HAS_DEVFREQ
-       bool
-       depends on ARCH_HAS_OPP
-       help
-         Denotes that the architecture supports DEVFREQ. If the architecture
-         supports multiple OPP entries per device and the frequency of the
-         devices with OPPs may be altered dynamically, the architecture
-         supports DEVFREQ.
-
 menuconfig PM_DEVFREQ
        bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
-       depends on PM_OPP && ARCH_HAS_DEVFREQ
        help
-         With OPP support, a device may have a list of frequencies and
-         voltages available. DEVFREQ, a generic DVFS framework can be
-         registered for a device with OPP support in order to let the
-         governor provided to DEVFREQ choose an operating frequency
-         based on the OPP's list and the policy given with DEVFREQ.
+         A device may have a list of frequencies and voltages available.
+         devfreq, a generic DVFS framework can be registered for a device
+         in order to let the governor provided to devfreq choose an
+         operating frequency based on the device driver's policy.
 
-         Each device may have its own governor and policy. DEVFREQ can
+         Each device may have its own governor and policy. Devfreq can
          reevaluate the device state periodically and/or based on the
-         OPP list changes (each frequency/voltage pair in OPP may be
-         disabled or enabled).
+         notification to "nb", a notifier block, of devfreq.
 
-         Like some CPUs with CPUFREQ, a device may have multiple clocks.
+         Like some CPUs with CPUfreq, a device may have multiple clocks.
          However, because the clock frequencies of a single device are
-         determined by the single device's state, an instance of DEVFREQ
+         determined by the single device's state, an instance of devfreq
          is attached to a single device and returns a "representative"
-         clock frequency from the OPP of the device, which is also attached
-         to a device by 1-to-1. The device registering DEVFREQ takes the
-         responsiblity to "interpret" the frequency listed in OPP and
+         clock frequency of the device, which is also attached
+         to a device by 1-to-1. The device registering devfreq takes the
+         responsiblity to "interpret" the representative frequency and
          to set its every clock accordingly with the "target" callback
-         given to DEVFREQ.
+         given to devfreq.
+
+         When OPP is used with the devfreq device, it is recommended to
+         register devfreq's nb to the OPP's notifier head.  If OPP is
+         used with the devfreq device, you may use OPP helper
+         functions defined in devfreq.h.
 
 if PM_DEVFREQ
 
index 5d15b812377bc9ad23420e6d41bd7ea62dc62e2f..59d24e9cb8c512a949a24803c5bf11a143fc649d 100644 (file)
@@ -15,7 +15,9 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/stat.h>
 #include <linux/opp.h>
 #include <linux/devfreq.h>
 #include <linux/workqueue.h>
@@ -416,10 +418,14 @@ out:
  */
 int devfreq_remove_device(struct devfreq *devfreq)
 {
+       bool central_polling;
+
        if (!devfreq)
                return -EINVAL;
 
-       if (!devfreq->governor->no_central_polling) {
+       central_polling = !devfreq->governor->no_central_polling;
+
+       if (central_polling) {
                mutex_lock(&devfreq_list_lock);
                while (wait_remove_device == devfreq) {
                        mutex_unlock(&devfreq_list_lock);
@@ -431,7 +437,7 @@ int devfreq_remove_device(struct devfreq *devfreq)
        mutex_lock(&devfreq->lock);
        _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
 
-       if (!devfreq->governor->no_central_polling)
+       if (central_polling)
                mutex_unlock(&devfreq_list_lock);
 
        return 0;
index ab8f469f5cf8154a244a59759cbadac01279b1d9..5a99bb3f255ae7c34fedc540949ab0163a15d5a5 100644 (file)
@@ -124,7 +124,7 @@ config MV_XOR
 
 config MX3_IPU
        bool "MX3x Image Processing Unit support"
-       depends on ARCH_MX3
+       depends on SOC_IMX31 || SOC_IMX35
        select DMA_ENGINE
        default y
        help
@@ -216,7 +216,7 @@ config PCH_DMA
 
 config IMX_SDMA
        tristate "i.MX SDMA support"
-       depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+       depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
        select DMA_ENGINE
        help
          Support the i.MX SDMA engine. This engine is integrated into
index 8af8e864a9cffbc13c91c625a837e11bb8de24fe..73464a62adf74ae1483a16ad71847c03fc03169f 100644 (file)
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
        { .compatible = "fsl,p1020-memory-controller", },
        { .compatible = "fsl,p1021-memory-controller", },
        { .compatible = "fsl,p2020-memory-controller", },
-       { .compatible = "fsl,p4080-memory-controller", },
+       { .compatible = "fsl,qoriq-memory-controller", },
        {},
 };
 MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
index bcb1126e3d00b2357a0c5748edbd8b1b470c0423..153980be4ee64462f12a97563c0143fc59b3eb82 100644 (file)
@@ -585,14 +585,12 @@ int dmi_name_in_serial(const char *str)
 }
 
 /**
- *     dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information.
+ *     dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
  *     @str:   Case sensitive Name
  */
 int dmi_name_in_vendors(const char *str)
 {
-       static int fields[] = { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_SYS_VENDOR,
-                               DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_BOARD_VENDOR,
-                               DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_NONE };
+       static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
        int i;
        for (i = 0; fields[i] != DMI_NONE; i++) {
                int f = fields[i];
index 8370f72d87ff5ed955789973845629f406f6734e..b0a81173a268175f71606e1aad2ab6067b67f698 100644 (file)
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
 }
 
 static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
-                              struct timespec *timespec, struct pstore_info *psi)
+                              struct timespec *timespec,
+                              char **buf, struct pstore_info *psi)
 {
        efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
        struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
                                timespec->tv_nsec = 0;
                                get_var_data_locked(efivars, &efivars->walk_entry->var);
                                size = efivars->walk_entry->var.DataSize;
-                               memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+                               *buf = kmalloc(size, GFP_KERNEL);
+                               if (*buf == NULL)
+                                       return -ENOMEM;
+                               memcpy(*buf, efivars->walk_entry->var.Data,
+                                      size);
                                efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
                                                   struct efivar_entry, list);
                                return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
 }
 
 static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
-                              struct timespec *time, struct pstore_info *psi)
+                              struct timespec *timespec,
+                              char **buf, struct pstore_info *psi)
 {
        return -1;
 }
index c811cb107904f64377a5c77f8dad8dbb5abc1bd2..2cce44a1d7d09c4af8ad76bb13a632779542d295 100644 (file)
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
        ibft_cleanup();
 }
 
+#ifdef CONFIG_ACPI
+static const struct {
+       char *sign;
+} ibft_signs[] = {
+       /*
+        * One spec says "IBFT", the other says "iBFT". We have to check
+        * for both.
+        */
+       { ACPI_SIG_IBFT },
+       { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+       int i;
+       struct acpi_table_header *table = NULL;
+
+       if (acpi_disabled)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+               acpi_get_table(ibft_signs[i].sign, 0, &table);
+               ibft_addr = (struct acpi_table_ibft *)table;
+       }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
 /*
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  */
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
 {
        int rc = 0;
 
+       /*
+          As on UEFI systems the setup_arch()/find_ibft_region()
+          is called before ACPI tables are parsed and it only does
+          legacy finding.
+       */
+       if (!ibft_addr)
+               acpi_find_ibft_region();
+
        if (ibft_addr) {
-               printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-                      (u64)isa_virt_to_bus(ibft_addr));
+               pr_info("iBFT detected.\n");
 
                rc = ibft_check_device();
                if (rc)
index bfe723266fd89bb84726d88cf15c206c020ee707..4da4eb9ae92604c35349ebaeb39b4a612bac3ed6 100644 (file)
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
 static const struct {
        char *sign;
 } ibft_signs[] = {
-#ifdef CONFIG_ACPI
-       /*
-        * One spec says "IBFT", the other says "iBFT". We have to check
-        * for both.
-        */
-       { ACPI_SIG_IBFT },
-#endif
        { "iBFT" },
        { "BIFT" },     /* Broadcom iSCSI Offload */
 };
@@ -62,14 +55,6 @@ static const struct {
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_SIZE 0x20000 /* 128kB */
 
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
-       ibft_addr = (struct acpi_table_ibft *)header;
-       return 0;
-}
-#endif /* CONFIG_ACPI */
-
 static int __init find_ibft_in_mem(void)
 {
        unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
                                 * the table cannot be valid. */
                                if (pos + len <= (IBFT_END-1)) {
                                        ibft_addr = (struct acpi_table_ibft *)virt;
+                                       pr_info("iBFT found at 0x%lx.\n", pos);
                                        goto done;
                                }
                        }
@@ -108,20 +94,12 @@ done:
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
-#ifdef CONFIG_ACPI
-       int i;
-#endif
        ibft_addr = NULL;
 
-#ifdef CONFIG_ACPI
-       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
-               acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
        /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
         * only use ACPI for this */
 
-       if (!ibft_addr && !efi_enabled)
+       if (!efi_enabled)
                find_ibft_in_mem();
 
        if (ibft_addr) {
index f10fc521951b17491348f0cd0fbb1f3013e31eae..1eedb6f7fdabe46efa082039818bef6f31fe1591 100644 (file)
 #include <linux/module.h>
 #include <linux/sigma.h>
 
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+       size_t payload = 0;
+
+       switch (sa->instr) {
+       case SIGMA_ACTION_WRITEXBYTES:
+       case SIGMA_ACTION_WRITESINGLE:
+       case SIGMA_ACTION_WRITESAFELOAD:
+               payload = sigma_action_len(sa);
+               break;
+       default:
+               break;
+       }
+
+       payload = ALIGN(payload, 2);
+
+       return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
 static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
 {
-       struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
        size_t len = sigma_action_len(sa);
-       int ret = 0;
+       int ret;
 
        pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
                sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
        case SIGMA_ACTION_WRITEXBYTES:
        case SIGMA_ACTION_WRITESINGLE:
        case SIGMA_ACTION_WRITESAFELOAD:
-               if (ssfw->fw->size < ssfw->pos + len)
-                       return -EINVAL;
                ret = i2c_master_send(client, (void *)&sa->addr, len);
                if (ret < 0)
                        return -EINVAL;
                break;
-
        case SIGMA_ACTION_DELAY:
-               ret = 0;
                udelay(len);
                len = 0;
                break;
-
        case SIGMA_ACTION_END:
-               return 1;
-
+               return 0;
        default:
                return -EINVAL;
        }
 
-       /* when arrive here ret=0 or sent data */
-       ssfw->pos += sigma_action_size(sa, len);
-       return ssfw->pos == ssfw->fw->size;
+       return 1;
 }
 
 static int
 process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
 {
-       pr_debug("%s: processing %p\n", __func__, ssfw);
+       struct sigma_action *sa;
+       size_t size;
+       int ret;
+
+       while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+               sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+               size = sigma_action_size(sa);
+               ssfw->pos += size;
+               if (ssfw->pos > ssfw->fw->size || size == 0)
+                       break;
+
+               ret = process_sigma_action(client, sa);
 
-       while (1) {
-               int ret = process_sigma_action(client, ssfw);
                pr_debug("%s: action returned %i\n", __func__, ret);
-               if (ret == 1)
-                       return 0;
-               else if (ret)
+
+               if (ret <= 0)
                        return ret;
        }
+
+       if (ssfw->pos != ssfw->fw->size)
+               return -EINVAL;
+
+       return 0;
 }
 
 int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
 
        /* then verify the header */
        ret = -EINVAL;
-       if (fw->size < sizeof(*ssfw_head))
+
+       /*
+        * Reject too small or unreasonable large files. The upper limit has been
+        * chosen a bit arbitrarily, but it should be enough for all practical
+        * purposes and having the limit makes it easier to avoid integer
+        * overflows later in the loading process.
+        */
+       if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
                goto done;
 
        ssfw_head = (void *)fw->data;
        if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
                goto done;
 
-       crc = crc32(0, fw->data, fw->size);
+       crc = crc32(0, fw->data + sizeof(*ssfw_head),
+                       fw->size - sizeof(*ssfw_head));
        pr_debug("%s: crc=%x\n", __func__, crc);
-       if (crc != ssfw_head->crc)
+       if (crc != le32_to_cpu(ssfw_head->crc))
                goto done;
 
        ssfw.pos = sizeof(*ssfw_head);
index dbcb0bcfd8dadf49ed156311594e704f81a9b9df..4e018d6a763996127cd370a6a3908273024f37a8 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI)    += gpio-davinci.o
 obj-$(CONFIG_GPIO_EP93XX)      += gpio-ep93xx.o
 obj-$(CONFIG_GPIO_IT8761E)     += gpio-it8761e.o
 obj-$(CONFIG_GPIO_JANZ_TTL)    += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695)      += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695)      += gpio-ks8695.o
 obj-$(CONFIG_GPIO_LANGWELL)    += gpio-langwell.o
 obj-$(CONFIG_ARCH_LPC32XX)     += gpio-lpc32xx.o
 obj-$(CONFIG_GPIO_MAX730X)     += gpio-max730x.o
index 038f5eb8b13d0ce2ff508fb31facd4619f64dcb1..f8ce29ef9f883a047b1e551f29aac5ead956e65a 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
 
 #define DA9052_INPUT                           1
 #define DA9052_OUTPUT_OPENDRAIN                2
@@ -43,6 +42,9 @@
 #define DA9052_GPIO_MASK_UPPER_NIBBLE          0xF0
 #define DA9052_GPIO_MASK_LOWER_NIBBLE          0x0F
 #define DA9052_GPIO_NIBBLE_SHIFT               4
+#define DA9052_IRQ_GPI0                        16
+#define DA9052_GPIO_ODD_SHIFT                  7
+#define DA9052_GPIO_EVEN_SHIFT                 3
 
 struct da9052_gpio {
        struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 {
        struct da9052_gpio *gpio = to_da9052_gpio(gc);
-       unsigned char register_value = 0;
        int ret;
 
        if (da9052_gpio_port_odd(offset)) {
-               if (value) {
-                       register_value = DA9052_GPIO_ODD_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_ODD_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_ODD_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio odd reg,%d",
                                        ret);
-               }
        } else {
-               if (value) {
-                       register_value = DA9052_GPIO_EVEN_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_EVEN_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_EVEN_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio even reg,%d",
                                        ret);
-               }
        }
 }
 
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
        .direction_input = da9052_gpio_direction_input,
        .direction_output = da9052_gpio_direction_output,
        .to_irq = da9052_gpio_to_irq,
-       .can_sleep = 1;
-       .ngpio = 16;
-       .base = -1;
+       .can_sleep = 1,
+       .ngpio = 16,
+       .base = -1,
 };
 
 static int __devinit da9052_gpio_probe(struct platform_device *pdev)
index ea8e73869250c24be2fd3e1fc35439fb3271f917..461958fc2264e91321a5bb80f7ba9cbed0a1a5bc 100644 (file)
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
                  &chip->reg->regs[chip->ch].imask);
 }
 
+static void ioh_irq_disable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien &= ~(1 << (d->irq - chip->irq_base));
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien |= 1 << (d->irq - chip->irq_base);
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
 static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 {
        struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
        int i, j;
        int ret = IRQ_NONE;
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < 8; i++, chip++) {
                reg_val = ioread32(&chip->reg->regs[i].istatus);
                for (j = 0; j < num_ports[i]; j++) {
                        if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
        ct->chip.irq_mask = ioh_irq_mask;
        ct->chip.irq_unmask = ioh_irq_unmask;
        ct->chip.irq_set_type = ioh_irq_type;
+       ct->chip.irq_disable = ioh_irq_disable;
+       ct->chip.irq_enable = ioh_irq_enable;
 
        irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
                               IRQ_NOREQUEST | IRQ_NOPROBE, 0);
index ec3fcf0a7e125ce19b3604959717a00e0b981cc0..5cd04b65c55610f1e015f25738f7c9f3b2649df7 100644 (file)
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
        return 0;
 }
 
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       /* GPIO 28..31 are input only on MPC5121 */
+       if (gpio >= 28)
+               return -EINVAL;
+
+       return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
        struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
        mm_gc->save_regs = mpc8xxx_gpio_save_regs;
        gc->ngpio = MPC8XXX_GPIO_PINS;
        gc->direction_input = mpc8xxx_gpio_dir_in;
-       gc->direction_output = mpc8xxx_gpio_dir_out;
-       if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
-               gc->get = mpc8572_gpio_get;
-       else
-               gc->get = mpc8xxx_gpio_get;
+       gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+               mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+       gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+               mpc8572_gpio_get : mpc8xxx_gpio_get;
        gc->set = mpc8xxx_gpio_set;
        gc->to_irq = mpc8xxx_gpio_to_irq;
 
index 147df8ae79dbd42a3105a38dd67a067fca7d36f6..d3f3e8f5456129e7036fc3737181d37b40b8c642 100644 (file)
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
  * Translate OpenFirmware node properties into platform_data
  * WARNING: This is DEPRECATED and will be removed eventually!
  */
-void
+static void
 pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
 {
        struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
                *invert = *val;
 }
 #else
-void
+static void
 pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
 {
        *gpio_base = -1;
index 093c90bd3c1d1b1d888ce2465831c95d125e0036..4102f63230fdd2950396e5bcf1013e1f169de42c 100644 (file)
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
        int ret, irq, i;
        static DECLARE_BITMAP(init_irq, NR_IRQS);
 
-       pdata = dev->dev.platform_data;
-       if (pdata == NULL)
-               return -ENODEV;
-
        chip = kzalloc(sizeof(*chip), GFP_KERNEL);
        if (chip == NULL)
                return -ENOMEM;
index 405c63b9d539a74826b0ecfdb6d4000f5a43b2f5..8323fc3898401ac957d9a677b947455f5a598b91 100644 (file)
@@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
        }
 
        if (num_clips && clips_ptr) {
+               if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+                       ret = -EINVAL;
+                       goto out_err1;
+               }
                clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
                if (!clips) {
                        ret = -ENOMEM;
index 3969f7553fe75bf5fa877020e3ccea53f150dfd8..d2619d72ceceb16d7b88a33b09708219483019b4 100644 (file)
@@ -456,6 +456,30 @@ done:
 EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
 
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       /* Decouple all encoders and their attached connectors from this crtc */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc != crtc)
+                       continue;
+
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       if (connector->encoder != encoder)
+                               continue;
+
+                       connector->encoder = NULL;
+               }
+       }
+
+       drm_helper_disable_unused_functions(dev);
+       return 0;
+}
+
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
  * @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                                (int)set->num_connectors, set->x, set->y);
        } else {
                DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
-               set->mode = NULL;
-               set->num_connectors = 0;
+               return drm_crtc_helper_disable(set->crtc);
        }
 
        dev = set->crtc->dev;
index 68b756253f9f20f300ce548b9f98a2e3fa5748b2..44a5d0ad8b7c56e99c9ce85f2dfcf14d19e855e0 100644 (file)
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        /* Prevent vblank irq processing while disabling vblank irqs,
         * so no updates of timestamps or count can happen after we've
         * disabled. Needed to prevent races in case of delayed irq's.
-        * Disable preemption, so vblank_time_lock is held as short as
-        * possible, even under a kernel with PREEMPT_RT patches.
         */
-       preempt_disable();
        spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
        dev->driver->disable_vblank(dev, crtc);
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        clear_vblank_timestamps(dev, crtc);
 
        spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-       preempt_enable();
 }
 
 static void vblank_disable_fn(unsigned long arg)
@@ -889,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
        /* Going from 0->1 means we have to enable interrupts again */
        if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
-               /* Disable preemption while holding vblank_time_lock. Do
-                * it explicitely to guard against PREEMPT_RT kernel.
-                */
-               preempt_disable();
                spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
                if (!dev->vblank_enabled[crtc]) {
                        /* Enable vblank irqs under vblank_time_lock protection.
@@ -912,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
                        }
                }
                spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
-               preempt_enable();
        } else {
                if (!dev->vblank_enabled[crtc]) {
                        atomic_dec(&dev->vblank_refcount[crtc]);
index 6f8afea94fc979b74476746aca1ae32341846357..2bb07bca511a12b6949892186565cfde960db8ca 100644 (file)
 #include "drm.h"
 
 #include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
 
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
 static int lowlevel_buffer_allocate(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry)
+               struct exynos_drm_gem_buf *buffer)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
-                       (dma_addr_t *)&entry->paddr, GFP_KERNEL);
-       if (!entry->paddr) {
+       buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+                       &buffer->dma_addr, GFP_KERNEL);
+       if (!buffer->kvaddr) {
                DRM_ERROR("failed to allocate buffer.\n");
                return -ENOMEM;
        }
 
-       DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
-                       (unsigned int)entry->vaddr, entry->paddr, entry->size);
+       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+                       (unsigned long)buffer->kvaddr,
+                       (unsigned long)buffer->dma_addr,
+                       buffer->size);
 
        return 0;
 }
 
 static void lowlevel_buffer_deallocate(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry)
+               struct exynos_drm_gem_buf *buffer)
 {
        DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-       if (entry->paddr && entry->vaddr && entry->size)
-               dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
-                               entry->paddr);
+       if (buffer->dma_addr && buffer->size)
+               dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+                               (dma_addr_t)buffer->dma_addr);
        else
-               DRM_DEBUG_KMS("entry data is null.\n");
+               DRM_DEBUG_KMS("buffer data are invalid.\n");
 }
 
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
                unsigned int size)
 {
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
 
        DRM_DEBUG_KMS("%s.\n", __FILE__);
+       DRM_DEBUG_KMS("desired size = 0x%x\n", size);
 
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry) {
-               DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+       buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+       if (!buffer) {
+               DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       entry->size = size;
+       buffer->size = size;
 
        /*
         * allocate memory region with size and set the memory information
-        * to vaddr and paddr of a entry object.
+        * to vaddr and dma_addr of a buffer object.
         */
-       if (lowlevel_buffer_allocate(dev, entry) < 0) {
-               kfree(entry);
-               entry = NULL;
+       if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+               kfree(buffer);
+               buffer = NULL;
                return ERR_PTR(-ENOMEM);
        }
 
-       return entry;
+       return buffer;
 }
 
 void exynos_drm_buf_destroy(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry)
+               struct exynos_drm_gem_buf *buffer)
 {
        DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-       if (!entry) {
-               DRM_DEBUG_KMS("entry is null.\n");
+       if (!buffer) {
+               DRM_DEBUG_KMS("buffer is null.\n");
                return;
        }
 
-       lowlevel_buffer_deallocate(dev, entry);
+       lowlevel_buffer_deallocate(dev, buffer);
 
-       kfree(entry);
-       entry = NULL;
+       kfree(buffer);
+       buffer = NULL;
 }
 
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
index 045d59eab01a29b5270410d661968d408dcfa663..6e91f9caa5dbdfe12aade5461c3ad253888f20bf 100644 (file)
 #ifndef _EXYNOS_DRM_BUF_H_
 #define _EXYNOS_DRM_BUF_H_
 
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
-       dma_addr_t paddr;
-       void __iomem *vaddr;
-       unsigned int size;
-};
-
 /* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
                unsigned int size);
 
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+/* get memory information of a drm framebuffer. */
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
 
 /* remove allocated physical memory. */
 void exynos_drm_buf_destroy(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry);
+               struct exynos_drm_gem_buf *buffer);
 
 #endif
index 985d9e7687287d915c6a76e2b7219f9e7840e7f8..d620b0784257f05cab7c52b121a460c99f6f7daf 100644 (file)
@@ -37,6 +37,8 @@
 
 struct exynos_drm_connector {
        struct drm_connector    drm_connector;
+       uint32_t                encoder_id;
+       struct exynos_drm_manager *manager;
 };
 
 /* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        mode->clock = timing->pixclock / 1000;
+       mode->vrefresh = timing->refresh;
 
        mode->hdisplay = timing->xres;
        mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
        mode->vsync_start = mode->vdisplay + timing->upper_margin;
        mode->vsync_end = mode->vsync_start + timing->vsync_len;
        mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+       if (timing->vmode & FB_VMODE_INTERLACED)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+       if (timing->vmode & FB_VMODE_DOUBLE)
+               mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 }
 
 /* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
        memset(timing, 0, sizeof(*timing));
 
        timing->pixclock = mode->clock * 1000;
-       timing->refresh = mode->vrefresh;
+       timing->refresh = drm_mode_vrefresh(mode);
 
        timing->xres = mode->hdisplay;
        timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
 
 static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 {
-       struct exynos_drm_manager *manager =
-                               exynos_drm_get_manager(connector->encoder);
-       struct exynos_drm_display *display = manager->display;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct exynos_drm_manager *manager = exynos_connector->manager;
+       struct exynos_drm_display_ops *display_ops = manager->display_ops;
        unsigned int count;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (!display) {
-               DRM_DEBUG_KMS("display is null.\n");
+       if (!display_ops) {
+               DRM_DEBUG_KMS("display_ops is null.\n");
                return 0;
        }
 
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
         * P.S. in case of lcd panel, count is always 1 if success
         * because lcd panel has only one mode.
         */
-       if (display->get_edid) {
+       if (display_ops->get_edid) {
                int ret;
                void *edid;
 
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                        return 0;
                }
 
-               ret = display->get_edid(manager->dev, connector,
+               ret = display_ops->get_edid(manager->dev, connector,
                                                edid, MAX_EDID);
                if (ret < 0) {
                        DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                struct drm_display_mode *mode = drm_mode_create(connector->dev);
                struct fb_videomode *timing;
 
-               if (display->get_timing)
-                       timing = display->get_timing(manager->dev);
+               if (display_ops->get_timing)
+                       timing = display_ops->get_timing(manager->dev);
                else {
                        drm_mode_destroy(connector->dev, mode);
                        return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
                                            struct drm_display_mode *mode)
 {
-       struct exynos_drm_manager *manager =
-                               exynos_drm_get_manager(connector->encoder);
-       struct exynos_drm_display *display = manager->display;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct exynos_drm_manager *manager = exynos_connector->manager;
+       struct exynos_drm_display_ops *display_ops = manager->display_ops;
        struct fb_videomode timing;
        int ret = MODE_BAD;
 
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
 
        convert_to_video_timing(&timing, mode);
 
-       if (display && display->check_timing)
-               if (!display->check_timing(manager->dev, (void *)&timing))
+       if (display_ops && display_ops->check_timing)
+               if (!display_ops->check_timing(manager->dev, (void *)&timing))
                        ret = MODE_OK;
 
        return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
 
 struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
 {
+       struct drm_device *dev = connector->dev;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       return connector->encoder;
+       obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+                                  DRM_MODE_OBJECT_ENCODER);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+                               exynos_connector->encoder_id);
+               return NULL;
+       }
+
+       encoder = obj_to_encoder(obj);
+
+       return encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
 static enum drm_connector_status
 exynos_drm_connector_detect(struct drm_connector *connector, bool force)
 {
-       struct exynos_drm_manager *manager =
-                               exynos_drm_get_manager(connector->encoder);
-       struct exynos_drm_display *display = manager->display;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct exynos_drm_manager *manager = exynos_connector->manager;
+       struct exynos_drm_display_ops *display_ops =
+                                       manager->display_ops;
        enum drm_connector_status status = connector_status_disconnected;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (display && display->is_connected) {
-               if (display->is_connected(manager->dev))
+       if (display_ops && display_ops->is_connected) {
+               if (display_ops->is_connected(manager->dev))
                        status = connector_status_connected;
                else
                        status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
 
        connector = &exynos_connector->drm_connector;
 
-       switch (manager->display->type) {
+       switch (manager->display_ops->type) {
        case EXYNOS_DISPLAY_TYPE_HDMI:
                type = DRM_MODE_CONNECTOR_HDMIA;
+               connector->interlace_allowed = true;
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
                break;
        default:
                type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
        if (err)
                goto err_connector;
 
+       exynos_connector->encoder_id = encoder->base.id;
+       exynos_connector->manager = manager;
        connector->encoder = encoder;
+
        err = drm_mode_connector_attach_encoder(connector, encoder);
        if (err) {
                DRM_ERROR("failed to attach a connector to a encoder\n");
index 9337e5e2dbb6204428178c8c3a962ebb3d61fab0..ee43cc22085304f7f07f267cd63a7d2be05d249a 100644 (file)
 #include "drmP.h"
 #include "drm_crtc_helper.h"
 
+#include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_encoder.h"
+#include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
 
 #define to_exynos_crtc(x)      container_of(x, struct exynos_drm_crtc,\
                                drm_crtc)
 
-/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- *     - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- *     - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
-       unsigned int fb_x;
-       unsigned int fb_y;
-       unsigned int crtc_x;
-       unsigned int crtc_y;
-       unsigned int crtc_w;
-       unsigned int crtc_h;
-};
-
 /*
  * Exynos specific crtc structure.
  *
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
 
        exynos_drm_fn_encoder(crtc, overlay,
                        exynos_drm_encoder_crtc_mode_set);
-       exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+       exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+                       exynos_drm_encoder_crtc_commit);
 }
 
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
-                                      struct drm_framebuffer *fb,
-                                      struct drm_display_mode *mode,
-                                      struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+                             struct drm_framebuffer *fb,
+                             struct drm_display_mode *mode,
+                             struct exynos_drm_crtc_pos *pos)
 {
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
        unsigned int actual_w;
        unsigned int actual_h;
 
-       entry = exynos_drm_fb_get_buf(fb);
-       if (!entry) {
-               DRM_LOG_KMS("entry is null.\n");
+       buffer = exynos_drm_fb_get_buf(fb);
+       if (!buffer) {
+               DRM_LOG_KMS("buffer is null.\n");
                return -EFAULT;
        }
 
-       overlay->paddr = entry->paddr;
-       overlay->vaddr = entry->vaddr;
+       overlay->dma_addr = buffer->dma_addr;
+       overlay->vaddr = buffer->kvaddr;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
                        (unsigned long)overlay->vaddr,
-                       (unsigned long)overlay->paddr);
+                       (unsigned long)overlay->dma_addr);
 
        actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
        actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
 
 static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
-       DRM_DEBUG_KMS("%s\n", __FILE__);
+       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-       /* TODO */
+       DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+                               exynos_drm_encoder_crtc_commit);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               /* TODO */
+               exynos_drm_fn_encoder(crtc, NULL,
+                               exynos_drm_encoder_crtc_disable);
+               break;
+       default:
+               DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+               break;
+       }
 }
 
 static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
 
 static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
 {
+       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       /* drm framework doesn't check NULL. */
+       exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+                       exynos_drm_encoder_crtc_commit);
 }
 
 static bool
index c584042d6d2cb21c8282975f9edf28898b4e56e0..25f72a62cb880b757b54e0514693eedd624ce27b 100644 (file)
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
 
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ *     - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ *     - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+       unsigned int fb_x;
+       unsigned int fb_y;
+       unsigned int crtc_x;
+       unsigned int crtc_y;
+       unsigned int crtc_w;
+       unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+                             struct drm_framebuffer *fb,
+                             struct drm_display_mode *mode,
+                             struct exynos_drm_crtc_pos *pos);
 #endif
index 83810cbe3c1770aeea62814dfdd98c2e1b33df2e..53e2216de61dd5c42c35eaa7f0c23ca1e991a621 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "drmP.h"
 #include "drm.h"
+#include "drm_crtc_helper.h"
 
 #include <drm/exynos_drm.h>
 
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_init(dev);
 
+       /* init kms poll for handling hpd */
+       drm_kms_helper_poll_init(dev);
+
        exynos_drm_mode_config_init(dev);
 
        /*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
        exynos_drm_fbdev_fini(dev);
        exynos_drm_device_unregister(dev);
        drm_vblank_cleanup(dev);
+       drm_kms_helper_poll_fini(dev);
        drm_mode_config_cleanup(dev);
        kfree(dev->dev_private);
 
index c03683f2ae72dffb084aaede5dba582a68c835f9..5e02e6ecc2e026955c9bf88bd5461bf893c1dbec 100644 (file)
@@ -29,6 +29,7 @@
 #ifndef _EXYNOS_DRM_DRV_H_
 #define _EXYNOS_DRM_DRV_H_
 
+#include <linux/module.h>
 #include "drm.h"
 
 #define MAX_CRTC       2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
  * @scan_flag: interlace or progressive way.
  *     (it could be DRM_MODE_FLAG_*)
  * @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- *             and this is physically continuous.
+ * @dma_addr: bus(accessed by dma) address to the memory region allocated
+ *     for a overlay.
  * @vaddr: virtual memory addresss to this overlay.
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
        unsigned int scan_flag;
        unsigned int bpp;
        unsigned int pitch;
-       dma_addr_t paddr;
+       dma_addr_t dma_addr;
        void __iomem *vaddr;
 
        bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
  * @check_timing: check if timing is valid or not.
  * @power_on: display device on or off.
  */
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
        enum exynos_drm_output_type type;
        bool (*is_connected)(struct device *dev);
        int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
  * @mode_set: convert drm_display_mode to hw specific display mode and
  *           would be called by encoder->mode_set().
  * @commit: set current hw specific display mode to hw.
+ * @disable: disable hardware specific display mode.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  */
 struct exynos_drm_manager_ops {
        void (*mode_set)(struct device *subdrv_dev, void *mode);
        void (*commit)(struct device *subdrv_dev);
+       void (*disable)(struct device *subdrv_dev);
        int (*enable_vblank)(struct device *subdrv_dev);
        void (*disable_vblank)(struct device *subdrv_dev);
 };
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
        int pipe;
        struct exynos_drm_manager_ops *ops;
        struct exynos_drm_overlay_ops *overlay_ops;
-       struct exynos_drm_display *display;
+       struct exynos_drm_display_ops *display_ops;
 };
 
 /*
index 7cf6fa86a67efb57e10407213639e8084d2c2138..153061415bafba2d3c83d155a7e86a4901db0b52 100644 (file)
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
        struct drm_device *dev = encoder->dev;
        struct drm_connector *connector;
        struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+       struct exynos_drm_manager_ops *manager_ops = manager->ops;
 
        DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
 
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               if (manager_ops && manager_ops->commit)
+                       manager_ops->commit(manager->dev);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               /* TODO */
+               if (manager_ops && manager_ops->disable)
+                       manager_ops->disable(manager->dev);
+               break;
+       default:
+               DRM_ERROR("unspecified mode %d\n", mode);
+               break;
+       }
+
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder) {
-                       struct exynos_drm_display *display = manager->display;
+                       struct exynos_drm_display_ops *display_ops =
+                                                       manager->display_ops;
 
-                       if (display && display->power_on)
-                               display->power_on(manager->dev, mode);
+                       DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+                                       connector->base.id, mode);
+                       if (display_ops && display_ops->power_on)
+                               display_ops->power_on(manager->dev, mode);
                }
        }
 }
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
 {
        struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
        struct exynos_drm_manager_ops *manager_ops = manager->ops;
-       struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        if (manager_ops && manager_ops->commit)
                manager_ops->commit(manager->dev);
-
-       if (overlay_ops && overlay_ops->commit)
-               overlay_ops->commit(manager->dev);
 }
 
 static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_encoder *encoder;
+       struct exynos_drm_private *private = dev->dev_private;
+       struct exynos_drm_manager *manager;
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               if (encoder->crtc != crtc)
-                       continue;
+               /*
+                * if crtc is detached from encoder, check pipe,
+                * otherwise check crtc attached to encoder
+                */
+               if (!encoder->crtc) {
+                       manager = to_exynos_encoder(encoder)->manager;
+                       if (manager->pipe < 0 ||
+                                       private->crtc[manager->pipe] != crtc)
+                               continue;
+               } else {
+                       if (encoder->crtc != crtc)
+                               continue;
+               }
 
                fn(encoder, data);
        }
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
        struct exynos_drm_manager *manager =
                to_exynos_encoder(encoder)->manager;
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+       int crtc = *(int *)data;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       /*
+        * when crtc is detached from encoder, this pipe is used
+        * to select manager operation
+        */
+       manager->pipe = crtc;
 
-       overlay_ops->commit(manager->dev);
+       if (overlay_ops && overlay_ops->commit)
+               overlay_ops->commit(manager->dev);
 }
 
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
        struct exynos_drm_overlay *overlay = data;
 
-       overlay_ops->mode_set(manager->dev, overlay);
+       if (overlay_ops && overlay_ops->mode_set)
+               overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+       struct exynos_drm_manager *manager =
+               to_exynos_encoder(encoder)->manager;
+       struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (overlay_ops && overlay_ops->disable)
+               overlay_ops->disable(manager->dev);
+
+       /*
+        * crtc is already detached from encoder and last
+        * function for detaching is properly done, so
+        * clear pipe from manager to prevent repeated call
+        */
+       if (!encoder->crtc)
+               manager->pipe = -1;
 }
 
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
index 5ecd645d06a97e1959599ecfd94d2b7ba2d50847..a22acfbf0e4ed6dc31894d03f7a5369158ded8a2 100644 (file)
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
 void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
 
 #endif
index 48d29cfd5240197f645c6732a9360bb255656f97..5bf4a1ac7f828cd26509857e5ed9b024f094f831 100644 (file)
@@ -29,7 +29,9 @@
 #include "drmP.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
 
+#include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_buf.h"
 #include "exynos_drm_gem.h"
  *
  * @fb: drm framebuffer obejct.
  * @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- *     - containing only the information to physically continuous memory
- *     region allocated at default framebuffer creation.
+ * @buffer: pointer to exynos_drm_gem_buffer object.
+ *     - contain the memory information to memory region allocated
+ *     at default framebuffer creation.
  */
 struct exynos_drm_fb {
        struct drm_framebuffer          fb;
        struct exynos_drm_gem_obj       *exynos_gem_obj;
-       struct exynos_drm_buf_entry     *entry;
+       struct exynos_drm_gem_buf       *buffer;
 };
 
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
         * default framebuffer has no gem object so
         * a buffer of the default framebuffer should be released at here.
         */
-       if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
-               exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+       if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
+               exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
 
        kfree(exynos_fb);
        exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
         */
        if (!mode_cmd->handle) {
                if (!file_priv) {
-                       struct exynos_drm_buf_entry *entry;
+                       struct exynos_drm_gem_buf *buffer;
 
                        /*
                         * in case that file_priv is NULL, it allocates
                         * only buffer and this buffer would be used
                         * for default framebuffer.
                         */
-                       entry = exynos_drm_buf_create(dev, size);
-                       if (IS_ERR(entry)) {
-                               ret = PTR_ERR(entry);
+                       buffer = exynos_drm_buf_create(dev, size);
+                       if (IS_ERR(buffer)) {
+                               ret = PTR_ERR(buffer);
                                goto err_buffer;
                        }
 
-                       exynos_fb->entry = entry;
+                       exynos_fb->buffer = buffer;
 
-                       DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
-                                       (unsigned long)entry->paddr, size);
+                       DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
+                                       (unsigned long)buffer->dma_addr, size);
 
                        goto out;
                } else {
-                       exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
-                                                       size,
-                                                       &mode_cmd->handle);
+                       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+                                                       &mode_cmd->handle,
+                                                       size);
                        if (IS_ERR(exynos_gem_obj)) {
                                ret = PTR_ERR(exynos_gem_obj);
                                goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
         * so that default framebuffer has no its own gem object,
         * only its own buffer object.
         */
-       exynos_fb->entry = exynos_gem_obj->entry;
+       exynos_fb->buffer = exynos_gem_obj->buffer;
 
-       DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
-                       (unsigned long)exynos_fb->entry->paddr, size,
+       DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+                       (unsigned long)exynos_fb->buffer->dma_addr, size,
                        (unsigned int)&exynos_gem_obj->base);
 
 out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
        return exynos_drm_fb_init(file_priv, dev, mode_cmd);
 }
 
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       entry = exynos_fb->entry;
-       if (!entry)
+       buffer = exynos_fb->buffer;
+       if (!buffer)
                return NULL;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
-                       (unsigned long)entry->vaddr,
-                       (unsigned long)entry->paddr);
+       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+                       (unsigned long)buffer->kvaddr,
+                       (unsigned long)buffer->dma_addr);
 
-       return entry;
+       return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *fb_helper = private->fb_helper;
+
+       if (fb_helper)
+               drm_fb_helper_hotplug_event(fb_helper);
 }
 
 static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
        .fb_create = exynos_drm_fb_create,
+       .output_poll_changed = exynos_drm_output_poll_changed,
 };
 
 void exynos_drm_mode_config_init(struct drm_device *dev)
index 1f4b3d1a77134d46ae193446a2720020dfd745cf..836f4100818710e830229b985591bfc85a047f03 100644 (file)
@@ -33,6 +33,7 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
 
 #define MAX_CONNECTOR          4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
 };
 
 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
-                                    struct drm_framebuffer *fb,
-                                    unsigned int fb_width,
-                                    unsigned int fb_height)
+                                    struct drm_framebuffer *fb)
 {
        struct fb_info *fbi = helper->fbdev;
        struct drm_device *dev = helper->dev;
        struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
-       struct exynos_drm_buf_entry *entry;
-       unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+       struct exynos_drm_gem_buf *buffer;
+       unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
        unsigned long offset;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
        exynos_fb->fb = fb;
 
        drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
-       drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+       drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
 
-       entry = exynos_drm_fb_get_buf(fb);
-       if (!entry) {
-               DRM_LOG_KMS("entry is null.\n");
+       buffer = exynos_drm_fb_get_buf(fb);
+       if (!buffer) {
+               DRM_LOG_KMS("buffer is null.\n");
                return -EFAULT;
        }
 
        offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
        offset += fbi->var.yoffset * fb->pitch;
 
-       dev->mode_config.fb_base = entry->paddr;
-       fbi->screen_base = entry->vaddr + offset;
-       fbi->fix.smem_start = entry->paddr + offset;
+       dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+       fbi->screen_base = buffer->kvaddr + offset;
+       fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
        fbi->screen_size = size;
        fbi->fix.smem_len = size;
 
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
                goto out;
        }
 
-       ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
-                       sizes->fb_height);
+       ret = exynos_drm_fbdev_update(helper, helper->fb);
        if (ret < 0)
                fb_dealloc_cmap(&fbi->cmap);
 
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
        }
 
        helper->fb = exynos_fbdev->fb;
-       return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
-                       sizes->fb_height);
+       return exynos_drm_fbdev_update(helper, helper->fb);
 }
 
 static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
        fb_helper = private->fb_helper;
 
        if (fb_helper) {
+               struct list_head temp_list;
+
+               INIT_LIST_HEAD(&temp_list);
+
+               /*
+                * fb_helper is reintialized but kernel fb is reused
+                * so kernel_fb_list need to be backuped and restored
+                */
+               if (!list_empty(&fb_helper->kernel_fb_list))
+                       list_replace_init(&fb_helper->kernel_fb_list,
+                                       &temp_list);
+
                drm_fb_helper_fini(fb_helper);
 
                ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
                        return ret;
                }
 
+               if (!list_empty(&temp_list))
+                       list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
                ret = drm_fb_helper_single_add_all_connectors(fb_helper);
                if (ret < 0) {
                        DRM_ERROR("failed to add fb helper to connectors\n");
index 4659c88cdd9bbaec81e6d6d1d9fe1d7b03964dec..db3b3d9e731d86475d734884e2ec113a8be5f914 100644 (file)
@@ -64,7 +64,7 @@ struct fimd_win_data {
        unsigned int            fb_width;
        unsigned int            fb_height;
        unsigned int            bpp;
-       dma_addr_t              paddr;
+       dma_addr_t              dma_addr;
        void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
        return 0;
 }
 
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
        .type = EXYNOS_DISPLAY_TYPE_LCD,
        .is_connected = fimd_display_is_connected,
        .get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
        writel(val, ctx->regs + VIDCON0);
 }
 
+static void fimd_disable(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+       struct drm_device *drm_dev = subdrv->drm_dev;
+       struct exynos_drm_manager *manager = &subdrv->manager;
+       u32 val;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       /* fimd dma off */
+       val = readl(ctx->regs + VIDCON0);
+       val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
+       writel(val, ctx->regs + VIDCON0);
+
+       /*
+        * if vblank is enabled status with dma off then
+        * it disables vsync interrupt.
+        */
+       if (drm_dev->vblank_enabled[manager->pipe] &&
+               atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
+               drm_vblank_put(drm_dev, manager->pipe);
+
+               /*
+                * if vblank_disable_allowed is 0 then disable
+                * vsync interrupt right now else the vsync interrupt
+                * would be disabled by drm timer once a current process
+                * gives up ownershop of vblank event.
+                */
+               if (!drm_dev->vblank_disable_allowed)
+                       drm_vblank_off(drm_dev, manager->pipe);
+       }
+}
+
 static int fimd_enable_vblank(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
 
 static struct exynos_drm_manager_ops fimd_manager_ops = {
        .commit = fimd_commit,
+       .disable = fimd_disable,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
 };
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
        win_data->ovl_height = overlay->crtc_height;
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
-       win_data->paddr = overlay->paddr + offset;
+       win_data->dma_addr = overlay->dma_addr + offset;
        win_data->vaddr = overlay->vaddr + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
        DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-                       (unsigned long)win_data->paddr,
+                       (unsigned long)win_data->dma_addr,
                        (unsigned long)win_data->vaddr);
        DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
                        overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
        writel(val, ctx->regs + SHADOWCON);
 
        /* buffer start address */
-       val = win_data->paddr;
+       val = (unsigned long)win_data->dma_addr;
        writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
        /* buffer end address */
        size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
-       val = win_data->paddr + size;
+       val = (unsigned long)(win_data->dma_addr + size);
        writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
        DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
-                       (unsigned long)win_data->paddr, val, size);
+                       (unsigned long)win_data->dma_addr, val, size);
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
 
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
 static void fimd_win_disable(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
-       struct fimd_win_data *win_data;
        int win = ctx->default_win;
        u32 val;
 
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
        if (win < 0 || win > WINDOWS_NR)
                return;
 
-       win_data = &ctx->win_data[win];
-
        /* protect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
                /* VSYNC interrupt */
                writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
 
+       /*
+        * in case that vblank_disable_allowed is 1, it could induce
+        * the problem that manager->pipe could be -1 because with
+        * disable callback, vsync interrupt isn't disabled and at this moment,
+        * vsync interrupt could occur. the vsync interrupt would be disabled
+        * by timer handler later.
+        */
+       if (manager->pipe == -1)
+               return IRQ_HANDLED;
+
        drm_handle_vblank(drm_dev, manager->pipe);
        fimd_finish_pageflip(drm_dev, manager->pipe);
 
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
         */
        drm_dev->irq_enabled = 1;
 
-       /*
-        * with vblank_disable_allowed = 1, vblank interrupt will be disabled
-        * by drm timer once a current process gives up ownership of
-        * vblank event.(drm_vblank_put function was called)
-        */
-       drm_dev->vblank_disable_allowed = 1;
-
        return 0;
 }
 
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        subdrv->manager.pipe = -1;
        subdrv->manager.ops = &fimd_manager_ops;
        subdrv->manager.overlay_ops = &fimd_overlay_ops;
-       subdrv->manager.display = &fimd_display;
+       subdrv->manager.display_ops = &fimd_display_ops;
        subdrv->manager.dev = dev;
 
        platform_set_drvdata(pdev, ctx);
index a8e7a88906ed22e43fb23d23a859dc825a335451..aba0fe47f7eaae47c10ac4b0839eba8c1edf7f54 100644 (file)
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
        return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
 }
 
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int size,
-               unsigned int *handle)
+static struct exynos_drm_gem_obj
+               *exynos_drm_gem_init(struct drm_device *drm_dev,
+                       struct drm_file *file_priv, unsigned int *handle,
+                       unsigned int size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
-       struct exynos_drm_buf_entry *entry;
        struct drm_gem_object *obj;
        int ret;
 
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       size = roundup(size, PAGE_SIZE);
-
        exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
        if (!exynos_gem_obj) {
                DRM_ERROR("failed to allocate exynos gem object.\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       /* allocate the new buffer object and memory region. */
-       entry = exynos_drm_buf_create(dev, size);
-       if (!entry) {
-               kfree(exynos_gem_obj);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       exynos_gem_obj->entry = entry;
-
        obj = &exynos_gem_obj->base;
 
-       ret = drm_gem_object_init(dev, obj, size);
+       ret = drm_gem_object_init(drm_dev, obj, size);
        if (ret < 0) {
-               DRM_ERROR("failed to initailize gem object.\n");
-               goto err_obj_init;
+               DRM_ERROR("failed to initialize gem object.\n");
+               ret = -EINVAL;
+               goto err_object_init;
        }
 
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
 err_create_mmap_offset:
        drm_gem_object_release(obj);
 
-err_obj_init:
-       exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
-
+err_object_init:
        kfree(exynos_gem_obj);
 
        return ERR_PTR(ret);
 }
 
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+                               struct drm_file *file_priv,
+                               unsigned int *handle, unsigned long size)
+{
+
+       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+       struct exynos_drm_gem_buf *buffer;
+
+       size = roundup(size, PAGE_SIZE);
+
+       DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+
+       buffer = exynos_drm_buf_create(dev, size);
+       if (IS_ERR(buffer)) {
+               return ERR_CAST(buffer);
+       }
+
+       exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
+       if (IS_ERR(exynos_gem_obj)) {
+               exynos_drm_buf_destroy(dev, buffer);
+               return exynos_gem_obj;
+       }
+
+       exynos_gem_obj->buffer = buffer;
+
+       return exynos_gem_obj;
+}
+
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+                                       struct drm_file *file_priv)
 {
        struct drm_exynos_gem_create *args = data;
-       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
 
-       DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+       DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
-                       &args->handle);
+       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+                                               &args->handle, args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
        unsigned long pfn, vm_size;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 
        vm_size = vma->vm_end - vma->vm_start;
        /*
-        * a entry contains information to physically continuous memory
+        * a buffer contains information to physically continuous memory
         * allocated by user request or at framebuffer creation.
         */
-       entry = exynos_gem_obj->entry;
+       buffer = exynos_gem_obj->buffer;
 
        /* check if user-requested size is valid. */
-       if (vm_size > entry->size)
+       if (vm_size > buffer->size)
                return -EINVAL;
 
        /*
         * get page frame number to physical memory to be mapped
         * to user space.
         */
-       pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+       pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
 
        DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
 
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
 
        exynos_gem_obj = to_exynos_gem_obj(gem_obj);
 
-       exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+       exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
 
        kfree(exynos_gem_obj);
 }
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * args->bpp >> 3;
        args->size = args->pitch * args->height;
 
-       exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
-                                                       &args->handle);
+       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
+                                                       args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        mutex_lock(&dev->struct_mutex);
 
-       pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+       pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+                       PAGE_SHIFT) + page_offset;
 
        ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
 
index e5fc0148277bf84b5fe8c9a31365256c940da802..ef8797334e6da746c54eed761521aacdc7afd7a8 100644 (file)
 #define to_exynos_gem_obj(x)   container_of(x,\
                        struct exynos_drm_gem_obj, base)
 
+/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ *     - this address could be physical address without IOMMU and
+ *     device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+       void __iomem            *kvaddr;
+       dma_addr_t              dma_addr;
+       unsigned long           size;
+};
+
 /*
  * exynos drm buffer structure.
  *
  * @base: a gem object.
  *     - a new handle to this gem object would be created
  *     by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- *     - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ *     - contain the information to memory region allocated
+ *     by user request or at framebuffer creation.
  *     continuous memory region allocated by user request
  *     or at framebuffer creation.
  *
  */
 struct exynos_drm_gem_obj {
        struct drm_gem_object base;
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
 };
 
 /* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int size,
-               unsigned int *handle);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+               struct drm_file *file_priv,
+               unsigned int *handle, unsigned long size);
 
 /*
  * request gem object creation and buffer allocation as the size
index 4f40f1ce1d8effd5ff1c5502cfcefcaaf49af751..004b048c5192979ce0e180d14824220eb271c9ab 100644 (file)
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        const struct intel_device_info *info = INTEL_INFO(dev);
 
        seq_printf(m, "gen: %d\n", info->gen);
+       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
        B(is_mobile);
        B(is_i85x);
@@ -636,11 +637,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
+       int ret;
 
        ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
        if (ring->size == 0)
                return 0;
 
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
        seq_printf(m, "Ring %s:\n", ring->name);
        seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
        seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +660,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
        seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
        seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -842,7 +850,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u16 crstanddelay = I915_READ16(CRSTANDVID);
+       u16 crstanddelay;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       crstanddelay = I915_READ16(CRSTANDVID);
+
+       mutex_unlock(&dev->struct_mutex);
 
        seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
 
@@ -940,7 +957,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 delayfreq;
-       int i;
+       int ret, i;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        for (i = 0; i < 16; i++) {
                delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +969,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
                           (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
        }
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -962,13 +985,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 inttoext;
-       int i;
+       int ret, i;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        for (i = 1; i <= 32; i++) {
                inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
                seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
        }
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -977,9 +1006,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 rgvmodectl = I915_READ(MEMMODECTL);
-       u32 rstdbyctl = I915_READ(RSTDBYCTL);
-       u16 crstandvid = I915_READ16(CRSTANDVID);
+       u32 rgvmodectl, rstdbyctl;
+       u16 crstandvid;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       rgvmodectl = I915_READ(MEMMODECTL);
+       rstdbyctl = I915_READ(RSTDBYCTL);
+       crstandvid = I915_READ16(CRSTANDVID);
+
+       mutex_unlock(&dev->struct_mutex);
 
        seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
                   "yes" : "no");
@@ -1167,9 +1206,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
index a9533c54c93c766a73acc453d38377a9e0ef5a64..a9ae374861e788fe7ced3b6cf8f1f129c6e84c86 100644 (file)
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 
        diff1 = now - dev_priv->last_time1;
 
+       /* Prevent division-by-zero if we are asking too fast.
+        * Also, we don't get interesting results if we are polling
+        * faster than once in 10ms, so just return the saved value
+        * in such cases.
+        */
+       if (diff1 <= 10)
+               return dev_priv->chipset_power;
+
        count1 = I915_READ(DMIEC);
        count2 = I915_READ(DDREC);
        count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
 
+       dev_priv->chipset_power = ret;
+
        return ret;
 }
 
index e9c2cfe45daae06b71fcae28836d3ae6dbb0497f..a1103fc6597dde91958658d4d6faaabaec38e803 100644 (file)
@@ -58,17 +58,17 @@ module_param_named(powersave, i915_powersave, int, 0600);
 MODULE_PARM_DESC(powersave,
                "Enable powersavings, fbc, downclocking, etc. (default: true)");
 
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 MODULE_PARM_DESC(semaphores,
-               "Use semaphores for inter-ring sync (default: false)");
+               "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 MODULE_PARM_DESC(i915_enable_rc6,
-               "Enable power-saving render C-state 6 (default: true)");
+               "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
 
-unsigned int i915_enable_fbc __read_mostly = -1;
+int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
 MODULE_PARM_DESC(i915_enable_fbc,
                "Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
                "Use panel (LVDS/eDP) downclocking for power savings "
                "(default: false)");
 
-unsigned int i915_panel_use_ssc __read_mostly = -1;
+int i915_panel_use_ssc __read_mostly = -1;
 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
 MODULE_PARM_DESC(lvds_use_ssc,
                "Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
 extern int intel_agp_enabled;
 
 #define INTEL_VGA_DEVICE(id, info) {           \
-       .class = PCI_CLASS_DISPLAY_VGA << 8,    \
+       .class = PCI_BASE_CLASS_DISPLAY << 16,  \
        .class_mask = 0xff0000,                 \
        .vendor = 0x8086,                       \
        .device = id,                           \
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
        }
 }
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        int count;
 
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
                udelay(10);
 }
 
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+       int count;
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+               udelay(10);
+
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+       POSTING_READ(FORCEWAKE_MT);
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+               udelay(10);
+}
+
 /*
  * Generally this is called implicitly by the register read function. However,
  * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 
        /* Forcewake is atomic in case we get in here without the lock */
        if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
-               __gen6_gt_force_wake_get(dev_priv);
+               dev_priv->display.force_wake_get(dev_priv);
 }
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
        POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+       POSTING_READ(FORCEWAKE_MT);
+}
+
 /*
  * see gen6_gt_force_wake_get()
  */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
        if (atomic_dec_and_test(&dev_priv->forcewake_count))
-               __gen6_gt_force_wake_put(dev_priv);
+               dev_priv->display.force_wake_put(dev_priv);
 }
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
index 06a37f4fd74b17ad82a0ba912da22d9df63e0dba..554bef7a3b9c8db39c5963fcc72ba1d411f31125 100644 (file)
@@ -107,6 +107,7 @@ struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
 struct opregion_asle;
+struct drm_i915_private;
 
 struct intel_opregion {
        struct opregion_header *header;
@@ -126,6 +127,9 @@ struct drm_i915_master_private {
        struct _drm_i915_sarea *sarea_priv;
 };
 #define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 5
 
 struct drm_i915_fence_reg {
        struct list_head lru_list;
@@ -168,7 +172,7 @@ struct drm_i915_error_state {
        u32 instdone1;
        u32 seqno;
        u64 bbaddr;
-       u64 fence[16];
+       u64 fence[I915_MAX_NUM_FENCES];
        struct timeval time;
        struct drm_i915_error_object {
                int page_count;
@@ -182,7 +186,7 @@ struct drm_i915_error_state {
                u32 gtt_offset;
                u32 read_domains;
                u32 write_domain;
-               s32 fence_reg:5;
+               s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
                s32 pinned:2;
                u32 tiling:2;
                u32 dirty:1;
@@ -218,6 +222,8 @@ struct drm_i915_display_funcs {
                          struct drm_i915_gem_object *obj);
        int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                            int x, int y);
+       void (*force_wake_get)(struct drm_i915_private *dev_priv);
+       void (*force_wake_put)(struct drm_i915_private *dev_priv);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -375,7 +381,7 @@ typedef struct drm_i915_private {
        struct notifier_block lid_notifier;
 
        int crt_ddc_pin;
-       struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+       struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
        int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
@@ -506,7 +512,7 @@ typedef struct drm_i915_private {
        u8 saveAR[21];
        u8 saveDACMASK;
        u8 saveCR[37];
-       uint64_t saveFENCE[16];
+       uint64_t saveFENCE[I915_MAX_NUM_FENCES];
        u32 saveCURACNTR;
        u32 saveCURAPOS;
        u32 saveCURABASE;
@@ -707,6 +713,7 @@ typedef struct drm_i915_private {
 
        u64 last_count1;
        unsigned long last_time1;
+       unsigned long chipset_power;
        u64 last_count2;
        struct timespec last_time2;
        unsigned long gfx_power;
@@ -777,10 +784,8 @@ struct drm_i915_gem_object {
         * Fence register bits (if any) for this object.  Will be set
         * as needed when mapped into the GTT.
         * Protected by dev->struct_mutex.
-        *
-        * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
         */
-       signed int fence_reg:5;
+       signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
 
        /**
         * Advice: are the backing pages purgeable?
@@ -997,12 +1002,12 @@ extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
 extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
 extern unsigned int i915_lvds_downclock __read_mostly;
-extern unsigned int i915_panel_use_ssc __read_mostly;
+extern int i915_panel_use_ssc __read_mostly;
 extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
-extern unsigned int i915_enable_fbc __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
+extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -1307,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1351,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
index d18b07adcffa3779fb8f6ba9090e5c2f9c279d66..8359dc777041be9265e53907025564196b1d0fbc 100644 (file)
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       for (i = 0; i < 16; i++) {
+       for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
                struct drm_i915_gem_object *obj = reg->obj;
 
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                         * so emit a request to do so.
                         */
                        request = kzalloc(sizeof(*request), GFP_KERNEL);
-                       if (request)
+                       if (request) {
                                ret = i915_add_request(obj->ring, NULL, request);
-                       else
+                               if (ret)
+                                       kfree(request);
+                       } else
                                ret = -ENOMEM;
                }
 
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
                /* On Gen6, we can have the GPU use the LLC (the CPU
                 * cache) for about a 10% performance improvement
                 * compared to uncached.  Graphics requests other than
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
        for (i = 0; i < I915_NUM_RINGS; i++)
                init_ring_lists(&dev_priv->ring[i]);
-       for (i = 0; i < 16; i++)
+       for (i = 0; i < I915_MAX_NUM_FENCES; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
index 3693e83a97f325f95276b4e9c009ac48569174ed..b9da8900ae4eaef10c12f2d68da79a226f9f3260 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/dma_remapping.h>
 
 struct change_domains {
        uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
        return 0;
 }
 
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+       if (INTEL_INFO(dev)->gen < 6)
+               return 0;
+
+       if (i915_semaphores >= 0)
+               return i915_semaphores;
+
+       /* Disable semaphores on SNB */
+       if (INTEL_INFO(dev)->gen == 6)
+               return 0;
+
+       return 1;
+}
+
 static int
 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                return 0;
 
        /* XXX gpu semaphores are implicated in various hard hangs on SNB */
-       if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+       if (!intel_enable_semaphores(obj->base.dev))
                return i915_gem_object_wait_rendering(obj);
 
        idx = intel_ring_sync_index(from, to);
index 9ee2729fe5c65ec3690a1f9dcf403045c79a4907..b40004b559771dc0244814335d44fb1537625087 100644 (file)
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                for (i = 0; i < 16; i++)
                        error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
index 5a09416e611f566774e18ad7095d3c007ea1876b..a26d5b0a36908c3d7ddd5c9c7407921fd2826db9 100644 (file)
  */
 #define   PP_READY             (1 << 30)
 #define   PP_SEQUENCE_NONE     (0 << 28)
-#define   PP_SEQUENCE_ON       (1 << 28)
-#define   PP_SEQUENCE_OFF      (2 << 28)
-#define   PP_SEQUENCE_MASK     0x30000000
+#define   PP_SEQUENCE_POWER_UP (1 << 28)
+#define   PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define   PP_SEQUENCE_MASK     (3 << 28)
+#define   PP_SEQUENCE_SHIFT    28
 #define   PP_CYCLE_DELAY_ACTIVE        (1 << 27)
-#define   PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
 #define   PP_SEQUENCE_STATE_MASK 0x0000000f
+#define   PP_SEQUENCE_STATE_OFF_IDLE   (0x0 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_1   (0x1 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_2   (0x2 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_3   (0x3 << 0)
+#define   PP_SEQUENCE_STATE_ON_IDLE    (0x8 << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_0    (0x9 << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_2    (0xa << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_3    (0xb << 0)
+#define   PP_SEQUENCE_STATE_RESET      (0xf << 0)
 #define PP_CONTROL     0x61204
 #define   POWER_TARGET_ON      (1 << 0)
 #define PP_ON_DELAYS   0x61208
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
-#define  TRANSCODER_A   (0)
-#define  TRANSCODER_B   (1 << 30)
-#define  TRANSCODER(pipe)      ((pipe) << 30)
-#define  TRANSCODER_MASK   (1 << 30)
+#define  TRANSCODER(pipe)       ((pipe) << 30)
+#define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
+#define  TRANSCODER_MASK        (1 << 30)
+#define  TRANSCODER_MASK_CPT    (3 << 29)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
 #define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f<<22)
 
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f<<22)
+
 #define  FORCEWAKE                             0xA18C
 #define  FORCEWAKE_ACK                         0x130090
+#define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
+#define  FORCEWAKE_MT_ACK                      0x130040
+#define  ECOBUS                                        0xa180
+#define    FORCEWAKE_MT_ENABLE                 (1<<5)
 
 #define  GT_FIFO_FREE_ENTRIES                  0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES                20
 
+#define GEN6_UCGCTL2                           0x9404
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE              (1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE               (1 << 11)
+
 #define GEN6_RPNSWREQ                          0xA008
 #define   GEN6_TURBO_DISABLE                   (1<<31)
 #define   GEN6_FREQUENCY(x)                    ((x)<<25)
index f8f602d76650177c1d7b3a11c9cac3b185fee1bf..7886e4fb60e3e23fb283461a690dbe43a928fc6e 100644 (file)
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                for (i = 0; i < 16; i++)
                        dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
index 981b1f1c04d8da4459f9ae776cf0681576eb915f..daa5743ccbd63ad87a45697bf49b13fb5538d9e1 100644 (file)
@@ -38,8 +38,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "drm_dp_helper.h"
-
 #include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 
        /* For PCH DP, enable TRANS_DP_CTL */
        if (HAS_PCH_CPT(dev) &&
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+           (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+            intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
                reg = TRANS_DP_CTL(pipe);
                temp = I915_READ(reg);
@@ -4669,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 /**
  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  * @crtc: CRTC structure
+ * @mode: requested mode
  *
  * A pipe may be connected to one or more outputs.  Based on the depth of the
  * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4680,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  *    Displays may support a restricted set as well, check EDID and clamp as
  *      appropriate.
+ *    DP may want to dither down to 6bpc to fit larger modes
  *
  * RETURNS:
  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  * true if they don't match).
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
-                                        unsigned int *pipe_bpp)
+                                        unsigned int *pipe_bpp,
+                                        struct drm_display_mode *mode)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4711,7 +4715,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                                lvds_bpc = 6;
 
                        if (lvds_bpc < display_bpc) {
-                               DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+                               DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
                                display_bpc = lvds_bpc;
                        }
                        continue;
@@ -4722,7 +4726,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                        unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 
                        if (edp_bpc < display_bpc) {
-                               DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+                               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
                                display_bpc = edp_bpc;
                        }
                        continue;
@@ -4737,7 +4741,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                        /* Don't use an invalid EDID bpc value */
                        if (connector->display_info.bpc &&
                            connector->display_info.bpc < display_bpc) {
-                               DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+                               DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
                                display_bpc = connector->display_info.bpc;
                        }
                }
@@ -4748,15 +4752,20 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                 */
                if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
                        if (display_bpc > 8 && display_bpc < 12) {
-                               DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
+                               DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
                                display_bpc = 12;
                        } else {
-                               DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
+                               DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
                                display_bpc = 8;
                        }
                }
        }
 
+       if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+               DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+               display_bpc = 6;
+       }
+
        /*
         * We could just drive the pipe at the highest bpc all the time and
         * enable dithering as needed, but that costs bandwidth.  So choose
@@ -4789,8 +4798,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
 
        display_bpc = min(display_bpc, bpc);
 
-       DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
-                        bpc, display_bpc);
+       DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
+                     bpc, display_bpc);
 
        *pipe_bpp = display_bpc * 3;
 
@@ -5018,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                        pipeconf &= ~PIPECONF_DOUBLE_WIDE;
        }
 
+       /* default to 8bpc */
+       pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+       if (is_dp) {
+               if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+                       pipeconf |= PIPECONF_BPP_6 |
+                                   PIPECONF_DITHER_EN |
+                                   PIPECONF_DITHER_TYPE_SP;
+               }
+       }
+
        dpll |= DPLL_VCO_ENABLE;
 
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5479,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        /* determine panel color depth */
        temp = I915_READ(PIPECONF(pipe));
        temp &= ~PIPE_BPC_MASK;
-       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
        switch (pipe_bpp) {
        case 18:
                temp |= PIPE_6BPC;
@@ -5671,7 +5690,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
        if ((is_lvds && dev_priv->lvds_dither) || dither) {
                pipeconf |= PIPECONF_DITHER_EN;
-               pipeconf |= PIPECONF_DITHER_TYPE_ST1;
+               pipeconf |= PIPECONF_DITHER_TYPE_SP;
        }
        if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -7188,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
 
+       ret = drm_vblank_get(dev, intel_crtc->pipe);
+       if (ret)
+               goto free_work;
+
        /* We borrow the event spin lock for protecting unpin_work */
        spin_lock_irqsave(&dev->event_lock, flags);
        if (intel_crtc->unpin_work) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                kfree(work);
+               drm_vblank_put(dev, intel_crtc->pipe);
 
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                return -EBUSY;
@@ -7211,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        crtc->fb = fb;
 
-       ret = drm_vblank_get(dev, intel_crtc->pipe);
-       if (ret)
-               goto cleanup_objs;
-
        work->pending_flip_obj = obj;
 
        work->enable_stall_check = true;
@@ -7237,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 cleanup_pending:
        atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
@@ -7246,6 +7265,8 @@ cleanup_objs:
        intel_crtc->unpin_work = NULL;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
+       drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
        kfree(work);
 
        return ret;
@@ -7886,6 +7907,31 @@ void intel_init_emon(struct drm_device *dev)
        dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+       /*
+        * Respect the kernel parameter if it is set
+        */
+       if (i915_enable_rc6 >= 0)
+               return i915_enable_rc6;
+
+       /*
+        * Disable RC6 on Ironlake
+        */
+       if (INTEL_INFO(dev)->gen == 5)
+               return 0;
+
+       /*
+        * Disable rc6 on Sandybridge
+        */
+       if (INTEL_INFO(dev)->gen == 6) {
+               DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+               return 0;
+       }
+       DRM_DEBUG_DRIVER("RC6 enabled\n");
+       return 1;
+}
+
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
        u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7922,7 +7968,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-       if (i915_enable_rc6)
+       if (intel_enable_rc6(dev_priv->dev))
                rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
                        GEN6_RC_CTL_RC6_ENABLE;
 
@@ -8148,6 +8194,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
+       /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+        * gating disable must be set.  Failure to set it results in
+        * flickering pixels due to Z write ordering failures after
+        * some amount of runtime in the Mesa "fire" demo, and Unigine
+        * Sanctuary and Tropics, and apparently anything else with
+        * alpha test or pixel discard.
+        *
+        * According to the spec, bit 11 (RCCUNIT) must also be set,
+        * but we didn't debug actual testcases to find it out.
+        */
+       I915_WRITE(GEN6_UCGCTL2,
+                  GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+                  GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
        /*
         * According to the spec the following bits should be
         * set in order to enable memory self-refresh and fbc:
@@ -8357,7 +8417,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
        /* rc6 disabled by default due to repeated reports of hanging during
         * boot and resume.
         */
-       if (!i915_enable_rc6)
+       if (!intel_enable_rc6(dev))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -8476,6 +8536,28 @@ static void intel_init_display(struct drm_device *dev)
 
        /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+               dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+               /* IVB configs may use multi-threaded forcewake */
+               if (IS_IVYBRIDGE(dev)) {
+                       u32     ecobus;
+
+                       mutex_lock(&dev->struct_mutex);
+                       __gen6_gt_force_wake_mt_get(dev_priv);
+                       ecobus = I915_READ(ECOBUS);
+                       __gen6_gt_force_wake_mt_put(dev_priv);
+                       mutex_unlock(&dev->struct_mutex);
+
+                       if (ecobus & FORCEWAKE_MT_ENABLE) {
+                               DRM_DEBUG_KMS("Using MT version of forcewake\n");
+                               dev_priv->display.force_wake_get =
+                                       __gen6_gt_force_wake_mt_get;
+                               dev_priv->display.force_wake_put =
+                                       __gen6_gt_force_wake_mt_put;
+                       }
+               }
+
                if (HAS_PCH_IBX(dev))
                        dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
                else if (HAS_PCH_CPT(dev))
index 09b318b0227f3571402c479c06bb376c6aae7eda..92b041b66e491cbd7775e58458f03fe9b7eee303 100644 (file)
@@ -59,7 +59,6 @@ struct intel_dp {
        struct i2c_algo_dp_aux_data algo;
        bool is_pch_edp;
        uint8_t train_set[4];
-       uint8_t link_status[DP_LINK_STATUS_SIZE];
        int panel_power_up_delay;
        int panel_power_down_delay;
        int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
        struct drm_display_mode *panel_fixed_mode;  /* for eDP */
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
-       unsigned long panel_off_jiffies;
 };
 
 /**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
 static int
 intel_dp_max_lane_count(struct intel_dp *intel_dp)
 {
-       int max_lane_count = 4;
-
-       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
-               max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
-               switch (max_lane_count) {
-               case 1: case 2: case 4:
-                       break;
-               default:
-                       max_lane_count = 4;
-               }
+       int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+       switch (max_lane_count) {
+       case 1: case 2: case 4:
+               break;
+       default:
+               max_lane_count = 4;
        }
        return max_lane_count;
 }
@@ -214,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
  */
 
 static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
 {
        struct drm_crtc *crtc = intel_dp->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int bpp = 24;
 
-       if (intel_crtc)
+       if (check_bpp)
+               bpp = check_bpp;
+       else if (intel_crtc)
                bpp = intel_crtc->bpp;
 
        return (pixel_clock * bpp + 9) / 10;
@@ -239,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
        int max_lanes = intel_dp_max_lane_count(intel_dp);
+       int max_rate, mode_rate;
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
                if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -248,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
                        return MODE_PANEL;
        }
 
-       if (intel_dp_link_required(intel_dp, mode->clock)
-           > intel_dp_max_data_rate(max_link_clock, max_lanes))
-               return MODE_CLOCK_HIGH;
+       mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+       if (mode_rate > max_rate) {
+                       mode_rate = intel_dp_link_required(intel_dp,
+                                                          mode->clock, 18);
+                       if (mode_rate > max_rate)
+                               return MODE_CLOCK_HIGH;
+                       else
+                               mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+       }
 
        if (mode->clock < 10000)
                return MODE_CLOCK_LOW;
@@ -368,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         * clock divider.
         */
        if (is_cpu_edp(intel_dp)) {
-               if (IS_GEN6(dev))
-                       aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+               if (IS_GEN6(dev) || IS_GEN7(dev))
+                       aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
        } else if (HAS_PCH_SPLIT(dev))
@@ -678,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
        int lane_count, clock;
        int max_lane_count = intel_dp_max_lane_count(intel_dp);
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+       int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -695,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                for (clock = 0; clock <= max_clock; clock++) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-                       if (intel_dp_link_required(intel_dp, mode->clock)
+                       if (intel_dp_link_required(intel_dp, mode->clock, bpp)
                                        <= link_avail) {
                                intel_dp->link_bw = bws[clock];
                                intel_dp->lane_count = lane_count;
@@ -768,12 +774,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                        continue;
 
                intel_dp = enc_to_intel_dp(encoder);
-               if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+               if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+                   intel_dp->base.type == INTEL_OUTPUT_EDP)
+               {
                        lane_count = intel_dp->lane_count;
                        break;
-               } else if (is_edp(intel_dp)) {
-                       lane_count = dev_priv->edp.lanes;
-                       break;
                }
        }
 
@@ -810,6 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                  struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct drm_crtc *crtc = intel_dp->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +828,32 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                        ironlake_edp_pll_off(encoder);
        }
 
-       intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
-       intel_dp->DP |= intel_dp->color_range;
+       /*
+        * There are four kinds of DP registers:
+        *
+        *      IBX PCH
+        *      SNB CPU
+        *      IVB CPU
+        *      CPT PCH
+        *
+        * IBX PCH and CPU are the same for almost everything,
+        * except that the CPU DP PLL is configured in this
+        * register
+        *
+        * CPT PCH is quite different, having many bits moved
+        * to the TRANS_DP_CTL register instead. That
+        * configuration happens (oddly) in ironlake_pch_enable
+        */
+
+       /* Preserve the BIOS-computed detected bit. This is
+        * supposed to be read-only.
+        */
+       intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+       intel_dp->DP |=  DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-               intel_dp->DP |= DP_SYNC_HS_HIGH;
-       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-               intel_dp->DP |= DP_SYNC_VS_HIGH;
+       /* Handle DP bits in common between all three register formats */
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
-               intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
-       else
-               intel_dp->DP |= DP_LINK_TRAIN_OFF;
+       intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
        switch (intel_dp->lane_count) {
        case 1:
@@ -852,59 +872,124 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
                intel_write_eld(encoder, adjusted_mode);
        }
-
        memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
        intel_dp->link_configuration[0] = intel_dp->link_bw;
        intel_dp->link_configuration[1] = intel_dp->lane_count;
        intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-
        /*
         * Check for DPCD version > 1.1 and enhanced framing support
         */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
            (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
                intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-               intel_dp->DP |= DP_ENHANCED_FRAMING;
        }
 
-       /* CPT DP's pipe select is decided in TRANS_DP_CTL */
-       if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
-               intel_dp->DP |= DP_PIPEB_SELECT;
+       /* Split out the IBX/CPU vs CPT settings */
+
+       if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+                       intel_dp->DP |= DP_SYNC_HS_HIGH;
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+                       intel_dp->DP |= DP_SYNC_VS_HIGH;
+               intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+               if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+                       intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+               intel_dp->DP |= intel_crtc->pipe << 29;
 
-       if (is_cpu_edp(intel_dp)) {
                /* don't miss out required setting for eDP */
                intel_dp->DP |= DP_PLL_ENABLE;
                if (adjusted_mode->clock < 200000)
                        intel_dp->DP |= DP_PLL_FREQ_160MHZ;
                else
                        intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+       } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+               intel_dp->DP |= intel_dp->color_range;
+
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+                       intel_dp->DP |= DP_SYNC_HS_HIGH;
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+                       intel_dp->DP |= DP_SYNC_VS_HIGH;
+               intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+               if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+                       intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+               if (intel_crtc->pipe == 1)
+                       intel_dp->DP |= DP_PIPEB_SELECT;
+
+               if (is_cpu_edp(intel_dp)) {
+                       /* don't miss out required setting for eDP */
+                       intel_dp->DP |= DP_PLL_ENABLE;
+                       if (adjusted_mode->clock < 200000)
+                               intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+                       else
+                               intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+               }
+       } else {
+               intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
        }
 }
 
-static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+#define IDLE_ON_MASK           (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE          (PP_ON | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK          (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+#define IDLE_OFF_VALUE         (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+
+#define IDLE_CYCLE_MASK                (PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE       (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+                                      u32 mask,
+                                      u32 value)
 {
-       unsigned long   off_time;
-       unsigned long   delay;
+       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       DRM_DEBUG_KMS("Wait for panel power off time\n");
+       DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+                     mask, value,
+                     I915_READ(PCH_PP_STATUS),
+                     I915_READ(PCH_PP_CONTROL));
 
-       if (ironlake_edp_have_panel_power(intel_dp) ||
-           ironlake_edp_have_panel_vdd(intel_dp))
-       {
-               DRM_DEBUG_KMS("Panel still on, no delay needed\n");
-               return;
+       if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
+               DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+                         I915_READ(PCH_PP_STATUS),
+                         I915_READ(PCH_PP_CONTROL));
        }
+}
 
-       off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay);
-       if (time_after(jiffies, off_time)) {
-               DRM_DEBUG_KMS("Time already passed");
-               return;
-       }
-       delay = jiffies_to_msecs(off_time - jiffies);
-       if (delay > intel_dp->panel_power_down_delay)
-               delay = intel_dp->panel_power_down_delay;
-       DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay);
-       msleep(delay);
+static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+{
+       DRM_DEBUG_KMS("Wait for panel power on\n");
+       ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+       DRM_DEBUG_KMS("Wait for panel power off time\n");
+       ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+       DRM_DEBUG_KMS("Wait for panel power cycle\n");
+       ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
+{
+       u32     control = I915_READ(PCH_PP_CONTROL);
+
+       control &= ~PANEL_UNLOCK_MASK;
+       control |= PANEL_UNLOCK_REGS;
+       return control;
 }
 
 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +1006,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
             "eDP VDD already requested on\n");
 
        intel_dp->want_panel_vdd = true;
+
        if (ironlake_edp_have_panel_vdd(intel_dp)) {
                DRM_DEBUG_KMS("eDP VDD already on\n");
                return;
        }
 
-       ironlake_wait_panel_off(intel_dp);
-       pp = I915_READ(PCH_PP_CONTROL);
-       pp &= ~PANEL_UNLOCK_MASK;
-       pp |= PANEL_UNLOCK_REGS;
+       if (!ironlake_edp_have_panel_power(intel_dp))
+               ironlake_wait_panel_power_cycle(intel_dp);
+
+       pp = ironlake_get_pp_control(dev_priv);
        pp |= EDP_FORCE_VDD;
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1038,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
        u32 pp;
 
        if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
-               pp = I915_READ(PCH_PP_CONTROL);
-               pp &= ~PANEL_UNLOCK_MASK;
-               pp |= PANEL_UNLOCK_REGS;
+               pp = ironlake_get_pp_control(dev_priv);
                pp &= ~EDP_FORCE_VDD;
                I915_WRITE(PCH_PP_CONTROL, pp);
                POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1046,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
                /* Make sure sequencer is idle before allowing subsequent activity */
                DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
                              I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
-               intel_dp->panel_off_jiffies = jiffies;
+
+               msleep(intel_dp->panel_power_down_delay);
        }
 }
 
@@ -972,9 +1057,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
                                                 struct intel_dp, panel_vdd_work);
        struct drm_device *dev = intel_dp->base.base.dev;
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev->mode_config.mutex);
        ironlake_panel_vdd_off_sync(intel_dp);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->mode_config.mutex);
 }
 
 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1069,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 
        DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
        WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
-       
+
        intel_dp->want_panel_vdd = false;
 
        if (sync) {
@@ -1000,23 +1085,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
        }
 }
 
-/* Returns true if the panel was already on when called */
 static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
+       u32 pp;
 
        if (!is_edp(intel_dp))
                return;
-       if (ironlake_edp_have_panel_power(intel_dp))
+
+       DRM_DEBUG_KMS("Turn eDP power on\n");
+
+       if (ironlake_edp_have_panel_power(intel_dp)) {
+               DRM_DEBUG_KMS("eDP power already on\n");
                return;
+       }
 
-       ironlake_wait_panel_off(intel_dp);
-       pp = I915_READ(PCH_PP_CONTROL);
-       pp &= ~PANEL_UNLOCK_MASK;
-       pp |= PANEL_UNLOCK_REGS;
+       ironlake_wait_panel_power_cycle(intel_dp);
 
+       pp = ironlake_get_pp_control(dev_priv);
        if (IS_GEN5(dev)) {
                /* ILK workaround: disable reset around power sequence */
                pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1112,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
        }
 
        pp |= POWER_TARGET_ON;
+       if (!IS_GEN5(dev))
+               pp |= PANEL_POWER_RESET;
+
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
 
-       if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
-                    5000))
-               DRM_ERROR("panel on wait timed out: 0x%08x\n",
-                         I915_READ(PCH_PP_STATUS));
+       ironlake_wait_panel_on(intel_dp);
 
        if (IS_GEN5(dev)) {
                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1127,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
        }
 }
 
-static void ironlake_edp_panel_off(struct drm_encoder *encoder)
+static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct drm_device *dev = encoder->dev;
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
-               PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
+       u32 pp;
 
        if (!is_edp(intel_dp))
                return;
-       pp = I915_READ(PCH_PP_CONTROL);
-       pp &= ~PANEL_UNLOCK_MASK;
-       pp |= PANEL_UNLOCK_REGS;
 
-       if (IS_GEN5(dev)) {
-               /* ILK workaround: disable reset around power sequence */
-               pp &= ~PANEL_POWER_RESET;
-               I915_WRITE(PCH_PP_CONTROL, pp);
-               POSTING_READ(PCH_PP_CONTROL);
-       }
+       DRM_DEBUG_KMS("Turn eDP power off\n");
 
-       intel_dp->panel_off_jiffies = jiffies;
+       WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
 
-       if (IS_GEN5(dev)) {
-               pp &= ~POWER_TARGET_ON;
-               I915_WRITE(PCH_PP_CONTROL, pp);
-               POSTING_READ(PCH_PP_CONTROL);
-               pp &= ~POWER_TARGET_ON;
-               I915_WRITE(PCH_PP_CONTROL, pp);
-               POSTING_READ(PCH_PP_CONTROL);
-               msleep(intel_dp->panel_power_cycle_delay);
-
-               if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
-                       DRM_ERROR("panel off wait timed out: 0x%08x\n",
-                                 I915_READ(PCH_PP_STATUS));
+       pp = ironlake_get_pp_control(dev_priv);
+       pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+       I915_WRITE(PCH_PP_CONTROL, pp);
+       POSTING_READ(PCH_PP_CONTROL);
 
-               pp |= PANEL_POWER_RESET; /* restore panel reset bit */
-               I915_WRITE(PCH_PP_CONTROL, pp);
-               POSTING_READ(PCH_PP_CONTROL);
-       }
+       ironlake_wait_panel_off(intel_dp);
 }
 
 static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1165,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
         * allowing it to appear.
         */
        msleep(intel_dp->backlight_on_delay);
-       pp = I915_READ(PCH_PP_CONTROL);
-       pp &= ~PANEL_UNLOCK_MASK;
-       pp |= PANEL_UNLOCK_REGS;
+       pp = ironlake_get_pp_control(dev_priv);
        pp |= EDP_BLC_ENABLE;
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1181,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
                return;
 
        DRM_DEBUG_KMS("\n");
-       pp = I915_READ(PCH_PP_CONTROL);
-       pp &= ~PANEL_UNLOCK_MASK;
-       pp |= PANEL_UNLOCK_REGS;
+       pp = ironlake_get_pp_control(dev_priv);
        pp &= ~EDP_BLC_ENABLE;
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1249,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+       ironlake_edp_backlight_off(intel_dp);
+       ironlake_edp_panel_off(intel_dp);
+
        /* Wake up the sink first */
        ironlake_edp_panel_vdd_on(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       intel_dp_link_down(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, false);
 
        /* Make sure the panel is off before trying to
         * change the mode
         */
-       ironlake_edp_backlight_off(intel_dp);
-       intel_dp_link_down(intel_dp);
-       ironlake_edp_panel_off(encoder);
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1274,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
        intel_dp_start_link_train(intel_dp);
        ironlake_edp_panel_on(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, true);
-
        intel_dp_complete_link_train(intel_dp);
        ironlake_edp_backlight_on(intel_dp);
 
@@ -1230,16 +1292,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
        if (mode != DRM_MODE_DPMS_ON) {
+               ironlake_edp_backlight_off(intel_dp);
+               ironlake_edp_panel_off(intel_dp);
+
                ironlake_edp_panel_vdd_on(intel_dp);
-               if (is_edp(intel_dp))
-                       ironlake_edp_backlight_off(intel_dp);
                intel_dp_sink_dpms(intel_dp, mode);
                intel_dp_link_down(intel_dp);
-               ironlake_edp_panel_off(encoder);
-               if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
-                       ironlake_edp_pll_off(encoder);
                ironlake_edp_panel_vdd_off(intel_dp, false);
+
+               if (is_cpu_edp(intel_dp))
+                       ironlake_edp_pll_off(encoder);
        } else {
+               if (is_cpu_edp(intel_dp))
+                       ironlake_edp_pll_on(encoder);
+
                ironlake_edp_panel_vdd_on(intel_dp);
                intel_dp_sink_dpms(intel_dp, mode);
                if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1313,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
                        ironlake_edp_panel_on(intel_dp);
                        ironlake_edp_panel_vdd_off(intel_dp, true);
                        intel_dp_complete_link_train(intel_dp);
-                       ironlake_edp_backlight_on(intel_dp);
                } else
                        ironlake_edp_panel_vdd_off(intel_dp, false);
                ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1350,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
  * link status information
  */
 static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp)
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
        return intel_dp_aux_native_read_retry(intel_dp,
                                              DP_LANE0_1_STATUS,
-                                             intel_dp->link_status,
+                                             link_status,
                                              DP_LINK_STATUS_SIZE);
 }
 
@@ -1301,27 +1366,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
 }
 
 static uint8_t
-intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+intel_get_adjust_request_voltage(uint8_t adjust_request[2],
                                 int lane)
 {
-       int         i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
        int         s = ((lane & 1) ?
                         DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
                         DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
-       uint8_t l = intel_dp_link_status(link_status, i);
+       uint8_t l = adjust_request[lane>>1];
 
        return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
 }
 
 static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
                                      int lane)
 {
-       int         i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
        int         s = ((lane & 1) ?
                         DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
                         DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
-       uint8_t l = intel_dp_link_status(link_status, i);
+       uint8_t l = adjust_request[lane>>1];
 
        return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
 }
@@ -1343,34 +1406,63 @@ static char     *link_train_names[] = {
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  */
-#define I830_DP_VOLTAGE_MAX        DP_TRAIN_VOLTAGE_SWING_800
 
 static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-       case DP_TRAIN_VOLTAGE_SWING_400:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_600:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_800:
-               return DP_TRAIN_PRE_EMPHASIS_3_5;
-       case DP_TRAIN_VOLTAGE_SWING_1200:
-       default:
-               return DP_TRAIN_PRE_EMPHASIS_0;
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_800;
+       else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_1200;
+       else
+               return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
+       } else {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               case DP_TRAIN_VOLTAGE_SWING_1200:
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
        }
 }
 
 static void
-intel_get_adjust_train(struct intel_dp *intel_dp)
+intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
        uint8_t v = 0;
        uint8_t p = 0;
        int lane;
+       uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
+       uint8_t voltage_max;
+       uint8_t preemph_max;
 
        for (lane = 0; lane < intel_dp->lane_count; lane++) {
-               uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
-               uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+               uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
+               uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
 
                if (this_v > v)
                        v = this_v;
@@ -1378,18 +1470,20 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
                        p = this_p;
        }
 
-       if (v >= I830_DP_VOLTAGE_MAX)
-               v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+       voltage_max = intel_dp_voltage_max(intel_dp);
+       if (v >= voltage_max)
+               v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
-       if (p >= intel_dp_pre_emphasis_max(v))
-               p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+       preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+       if (p >= preemph_max)
+               p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
        for (lane = 0; lane < 4; lane++)
                intel_dp->train_set[lane] = v | p;
 }
 
 static uint32_t
-intel_dp_signal_levels(uint8_t train_set, int lane_count)
+intel_dp_signal_levels(uint8_t train_set)
 {
        uint32_t        signal_levels = 0;
 
@@ -1454,13 +1548,43 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
        }
 }
 
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       switch (signal_levels) {
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_400MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+               return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_600MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_800MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+                             "0x%x\n", signal_levels);
+               return EDP_LINK_TRAIN_500MV_0DB_IVB;
+       }
+}
+
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
                      int lane)
 {
-       int i = DP_LANE0_1_STATUS + (lane >> 1);
        int s = (lane & 1) * 4;
-       uint8_t l = intel_dp_link_status(link_status, i);
+       uint8_t l = link_status[lane>>1];
 
        return (l >> s) & 0xf;
 }
@@ -1485,18 +1609,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
                         DP_LANE_CHANNEL_EQ_DONE|\
                         DP_LANE_SYMBOL_LOCKED)
 static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp)
+intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
        uint8_t lane_align;
        uint8_t lane_status;
        int lane;
 
-       lane_align = intel_dp_link_status(intel_dp->link_status,
+       lane_align = intel_dp_link_status(link_status,
                                          DP_LANE_ALIGN_STATUS_UPDATED);
        if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
                return false;
        for (lane = 0; lane < intel_dp->lane_count; lane++) {
-               lane_status = intel_get_lane_status(intel_dp->link_status, lane);
+               lane_status = intel_get_lane_status(link_status, lane);
                if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
                        return false;
        }
@@ -1521,8 +1645,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
 
        ret = intel_dp_aux_native_write(intel_dp,
                                        DP_TRAINING_LANE0_SET,
-                                       intel_dp->train_set, 4);
-       if (ret != 4)
+                                       intel_dp->train_set,
+                                       intel_dp->lane_count);
+       if (ret != intel_dp->lane_count)
                return false;
 
        return true;
@@ -1538,7 +1663,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
        int i;
        uint8_t voltage;
        bool clock_recovery = false;
-       int tries;
+       int voltage_tries, loop_tries;
        u32 reg;
        uint32_t DP = intel_dp->DP;
 
@@ -1559,26 +1684,35 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                                  DP_LINK_CONFIGURATION_SIZE);
 
        DP |= DP_PORT_EN;
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
        else
                DP &= ~DP_LINK_TRAIN_MASK;
        memset(intel_dp->train_set, 0, 4);
        voltage = 0xff;
-       tries = 0;
+       voltage_tries = 0;
+       loop_tries = 0;
        clock_recovery = false;
        for (;;) {
                /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+               uint8_t     link_status[DP_LINK_STATUS_SIZE];
                uint32_t    signal_levels;
-               if (IS_GEN6(dev) && is_edp(intel_dp)) {
+
+
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
-                       signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
+                       signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+                       DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1590,10 +1724,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                /* Set training pattern 1 */
 
                udelay(100);
-               if (!intel_dp_get_link_status(intel_dp))
+               if (!intel_dp_get_link_status(intel_dp, link_status)) {
+                       DRM_ERROR("failed to get link status\n");
                        break;
+               }
 
-               if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+               if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+                       DRM_DEBUG_KMS("clock recovery OK\n");
                        clock_recovery = true;
                        break;
                }
@@ -1602,20 +1739,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                for (i = 0; i < intel_dp->lane_count; i++)
                        if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
                                break;
-               if (i == intel_dp->lane_count)
-                       break;
+               if (i == intel_dp->lane_count) {
+                       ++loop_tries;
+                       if (loop_tries == 5) {
+                               DRM_DEBUG_KMS("too many full retries, give up\n");
+                               break;
+                       }
+                       memset(intel_dp->train_set, 0, 4);
+                       voltage_tries = 0;
+                       continue;
+               }
 
                /* Check to see if we've tried the same voltage 5 times */
                if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-                       ++tries;
-                       if (tries == 5)
+                       ++voltage_tries;
+                       if (voltage_tries == 5) {
+                               DRM_DEBUG_KMS("too many voltage retries, give up\n");
                                break;
+                       }
                } else
-                       tries = 0;
+                       voltage_tries = 0;
                voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
                /* Compute new intel_dp->train_set as requested by target */
-               intel_get_adjust_train(intel_dp);
+               intel_get_adjust_train(intel_dp, link_status);
        }
 
        intel_dp->DP = DP;
@@ -1638,6 +1785,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
        for (;;) {
                /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
                uint32_t    signal_levels;
+               uint8_t     link_status[DP_LINK_STATUS_SIZE];
 
                if (cr_tries > 5) {
                        DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,15 +1793,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        break;
                }
 
-               if (IS_GEN6(dev) && is_edp(intel_dp)) {
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
-                       signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
+                       signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1665,17 +1816,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        break;
 
                udelay(400);
-               if (!intel_dp_get_link_status(intel_dp))
+               if (!intel_dp_get_link_status(intel_dp, link_status))
                        break;
 
                /* Make sure clock is still ok */
-               if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+               if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        intel_dp_start_link_train(intel_dp);
                        cr_tries++;
                        continue;
                }
 
-               if (intel_channel_eq_ok(intel_dp)) {
+               if (intel_channel_eq_ok(intel_dp, link_status)) {
                        channel_eq = true;
                        break;
                }
@@ -1690,11 +1841,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                }
 
                /* Compute new intel_dp->train_set as requested by target */
-               intel_get_adjust_train(intel_dp);
+               intel_get_adjust_train(intel_dp, link_status);
                ++tries;
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                reg = DP | DP_LINK_TRAIN_OFF_CPT;
        else
                reg = DP | DP_LINK_TRAIN_OFF;
@@ -1724,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                udelay(100);
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
        } else {
@@ -1735,8 +1886,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 
        msleep(17);
 
-       if (is_edp(intel_dp))
-               DP |= DP_LINK_TRAIN_OFF;
+       if (is_edp(intel_dp)) {
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+                       DP |= DP_LINK_TRAIN_OFF_CPT;
+               else
+                       DP |= DP_LINK_TRAIN_OFF;
+       }
 
        if (!HAS_PCH_CPT(dev) &&
            I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1977,7 @@ static void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
        u8 sink_irq_vector;
+       u8 link_status[DP_LINK_STATUS_SIZE];
 
        if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
                return;
@@ -1830,7 +1986,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                return;
 
        /* Try to read receiver status if the link appears to be up */
-       if (!intel_dp_get_link_status(intel_dp)) {
+       if (!intel_dp_get_link_status(intel_dp, link_status)) {
                intel_dp_link_down(intel_dp);
                return;
        }
@@ -1855,7 +2011,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                        DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
        }
 
-       if (!intel_channel_eq_ok(intel_dp)) {
+       if (!intel_channel_eq_ok(intel_dp, link_status)) {
                DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
                              drm_get_encoder_name(&intel_dp->base.base));
                intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2335,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
                        continue;
 
                intel_dp = enc_to_intel_dp(encoder);
-               if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+               if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+                   intel_dp->base.type == INTEL_OUTPUT_EDP)
                        return intel_dp->output_reg;
        }
 
@@ -2321,7 +2478,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 
                cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
                        PANEL_LIGHT_ON_DELAY_SHIFT;
-               
+
                cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
                        PANEL_LIGHT_OFF_DELAY_SHIFT;
 
@@ -2354,11 +2511,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
                              intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
 
-               intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
-
                ironlake_edp_panel_vdd_on(intel_dp);
                ret = intel_dp_get_dpcd(intel_dp);
                ironlake_edp_panel_vdd_off(intel_dp, false);
+
                if (ret) {
                        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
                                dev_priv->no_aux_handshake =
index bd9a604b73da2f07aefdfae712135a5c256f18f7..a1b4343814e8faac807384057ba51a541746b6bd 100644 (file)
 /* drm_display_mode->private_flags */
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
 
 static inline void
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
index 42f165a520de440880b574c70f1ca4aa4b7b5282..e44191132ac4e97307029e44a50d65e7a96be53a 100644 (file)
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Asus AT5NM10T-I",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+               },
+       },
 
        { }     /* terminating entry */
 };
index 499d4c0dbeebd93f944d338ffc29184ed5103617..04d79fd1dc9d9e1477ee2592ac0fb2212883e180 100644 (file)
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                max >>= 16;
        } else {
-               if (IS_PINEVIEW(dev)) {
+               if (INTEL_INFO(dev)->gen < 4)
                        max >>= 17;
-               } else {
+               else
                        max >>= 16;
-                       if (INTEL_INFO(dev)->gen < 4)
-                               max &= ~1;
-               }
 
                if (is_backlight_combination_mode(dev))
                        max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
                val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
        } else {
                val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-               if (IS_PINEVIEW(dev))
+               if (INTEL_INFO(dev)->gen < 4)
                        val >>= 1;
 
                if (is_backlight_combination_mode(dev)) {
                        u8 lbpc;
 
-                       val &= ~1;
                        pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
                        val *= lbpc;
                }
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
        }
 
        tmp = I915_READ(BLC_PWM_CTL);
-       if (IS_PINEVIEW(dev)) {
-               tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+       if (INTEL_INFO(dev)->gen < 4) 
                level <<= 1;
-       } else
-               tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+       tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
        I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
@@ -326,7 +320,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
 static int intel_panel_get_brightness(struct backlight_device *bd)
 {
        struct drm_device *dev = bl_get_data(bd);
-       return intel_panel_get_backlight(dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       return dev_priv->backlight_level;
 }
 
 static const struct backlight_ops intel_panel_bl_ops = {
index 3003fb25aefde4ac697a5d2694d825faec76fe0d..f7b9268df2666831795835c1f378a93cf1340379 100644 (file)
@@ -50,6 +50,7 @@
 #define IS_TMDS(c)     (c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
 static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                }
                sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
        }
-       if (intel_crtc->pipe == 1)
-               sdvox |= SDVO_PIPE_B_SELECT;
+
+       if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+               sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+       else
+               sdvox |= TRANSCODER(intel_crtc->pipe);
+
        if (intel_sdvo->has_hdmi_audio)
                sdvox |= SDVO_AUDIO_ENABLE;
 
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
        return status;
 }
 
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+                                 struct edid *edid)
+{
+       bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+       bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+       DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+                     connector_is_digital, monitor_is_digital);
+       return connector_is_digital == monitor_is_digital;
+}
+
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
                if (edid == NULL)
                        edid = intel_sdvo_get_analog_edid(connector);
                if (edid != NULL) {
-                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
-                               ret = connector_status_disconnected;
-                       else
+                       if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+                                                             edid))
                                ret = connector_status_connected;
+                       else
+                               ret = connector_status_disconnected;
+
                        connector->display_info.raw_edid = NULL;
                        kfree(edid);
                } else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
                edid = intel_sdvo_get_analog_edid(connector);
 
        if (edid != NULL) {
-               struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-               bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-               bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
-               if (connector_is_digital == monitor_is_digital) {
+               if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+                                                     edid)) {
                        drm_mode_connector_update_edid_property(connector, edid);
                        drm_add_edid_modes(connector, edid);
                }
index ddbabefb4273ffa0fe071d49422dd448d21875d9..b12fd2c80812d002e0840787e8ff47481a3691a2 100644 (file)
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        spin_unlock_irqrestore(&dev->event_lock, flags);
        return 0;
 }
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+                           struct drm_mode_create_dumb *args)
+{
+       struct nouveau_bo *bo;
+       int ret;
+
+       args->pitch = roundup(args->width * (args->bpp / 8), 256);
+       args->size = args->pitch * args->height;
+       args->size = roundup(args->size, PAGE_SIZE);
+
+       ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+       if (ret)
+               return ret;
+
+       ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+       drm_gem_object_unreference_unlocked(bo->gem);
+       return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+                            uint32_t handle)
+{
+       return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+                               struct drm_device *dev,
+                               uint32_t handle, uint64_t *poffset)
+{
+       struct drm_gem_object *gem;
+
+       gem = drm_gem_object_lookup(dev, file_priv, handle);
+       if (gem) {
+               struct nouveau_bo *bo = gem->driver_private;
+               *poffset = bo->bo.addr_space_offset;
+               drm_gem_object_unreference_unlocked(gem);
+               return 0;
+       }
+
+       return -ENOENT;
+}
index 9f7bb12952623b51bed52e21148ec04dcca902af..9791d13c9e3b8d9d223bd5bb1cae182de4bc89ee 100644 (file)
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
        .gem_open_object = nouveau_gem_object_open,
        .gem_close_object = nouveau_gem_object_close,
 
+       .dumb_create = nouveau_display_dumb_create,
+       .dumb_map_offset = nouveau_display_dumb_map_offset,
+       .dumb_destroy = nouveau_display_dumb_destroy,
+
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
 #ifdef GIT_REVISION
index 29837da1098b3a85a50bbc9ee4ccc2b0a4901dab..4c0be3a4ed882f5430ea628ba4d80fef1f1d3690 100644 (file)
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                           struct drm_pending_vblank_event *event);
 int nouveau_finish_page_flip(struct nouveau_channel *,
                             struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+                               struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+                                   uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+                                uint32_t handle);
 
 /* nv10_gpio.c */
 int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
index 02222c540aee1a38f963dd13eeda7f0f7c7e8844..960c0ae0c0c3de650dac30beaec208b9bfd3558d 100644 (file)
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
                return ret;
        }
 
-       ret = drm_mm_init(&chan->ramin_heap, base, size);
+       ret = drm_mm_init(&chan->ramin_heap, base, size - base);
        if (ret) {
                NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
                nouveau_gpuobj_ref(NULL, &chan->ramin);
index b75258a9fe44d544521431133ecd64ec0136637d..c8a463b76c89f03f96f68e48107d9993d2a2bda1 100644 (file)
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
                        pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                }
+               nvbe->unmap_pages = false;
        }
+
+       nvbe->pages = NULL;
 }
 
 static void
index d23ca00e7d627c65e3814891c0a8aa0136f58395..06de250fe617df89ad4e05a34d3e05be8907f126 100644 (file)
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), mc;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
        struct dcb_entry *dcb;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        dcb = disp->irq.dcb;
index a74e501afd25b44b7315d497f8ca30908dcb3995..ecfafd70cf0ed2b6f9ee0b51c74031caa337de27 100644 (file)
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
        u8  tpnr[GPC_MAX];
        int i, gpc, tpc;
 
+       nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
        /*
         *      TP      ROP UNKVAL(magic_not_rop_nr)
         * 450: 4/0/0/0 2        3
index 23d63b4b3d77078ce4a351f82b8d7cba16d280b7..cb006a718e700f2c72cf06dfdac0c7f22e8cbdac 100644 (file)
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
                        continue;
 
                if (nv_partner != nv_encoder &&
-                   nv_partner->dcb->or == nv_encoder->or) {
+                   nv_partner->dcb->or == nv_encoder->dcb->or) {
                        if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
                                return;
                        break;
index 87631fede1f8ed2a750419200688c151f552e7e6..2b97262e3ab14af5af32b4d85711828563450b09 100644 (file)
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       if (tiling_flags & RADEON_TILING_MACRO)
+       if (tiling_flags & RADEON_TILING_MACRO) {
+               if (rdev->family >= CHIP_CAYMAN)
+                       tmp = rdev->config.cayman.tile_config;
+               else
+                       tmp = rdev->config.evergreen.tile_config;
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0: /* 4 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+                       break;
+               case 1: /* 8 banks */
+               default:
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+                       break;
+               case 2: /* 16 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0: /* 1KB rows */
+               default:
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+                       break;
+               case 1: /* 2KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+                       break;
+               case 2: /* 4KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+                       break;
+               }
+
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-       else if (tiling_flags & RADEON_TILING_MICRO)
+       else if (tiling_flags & RADEON_TILING_MICRO)
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
        switch (radeon_crtc->crtc_id) {
index 1d603a3335db65b4bf425228a8f98221d01fcd48..92c9628c572daa2495a685b0a7898273957ffc73 100644 (file)
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -3271,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
                        rdev->accel_working = false;
                }
        }
+
+       /* Don't start up if the MC ucode is missing on BTC parts.
+        * The default clocks and voltages before the MC ucode
+        * is loaded are not suffient for advanced operations.
+        */
+       if (ASIC_IS_DCE5(rdev)) {
+               if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+                       DRM_ERROR("radeon: MC ucode required for NI+.\n");
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
index 7fdfa8ea7570654b6d3faf70e410e5ff6b9554d7..cd4590aae154154d62906d901a63b8b368fea861 100644 (file)
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
        u32                     group_size;
        u32                     nbanks;
        u32                     npipes;
+       u32                     row_size;
        /* value we track */
        u32                     nsamples;
        u32                     cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
        struct radeon_bo        *db_s_write_bo;
 };
 
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+       if (tiling_flags & RADEON_TILING_MACRO)
+               return ARRAY_2D_TILED_THIN1;
+       else if (tiling_flags & RADEON_TILING_MICRO)
+               return ARRAY_1D_TILED_THIN1;
+       else
+               return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+       switch (nbanks) {
+       case 2:
+               return ADDR_SURF_2_BANK;
+       case 4:
+               return ADDR_SURF_4_BANK;
+       case 8:
+       default:
+               return ADDR_SURF_8_BANK;
+       case 16:
+               return ADDR_SURF_16_BANK;
+       }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+       switch (row_size) {
+       case 1:
+       default:
+               return ADDR_SURF_TILE_SPLIT_1KB;
+       case 2:
+               return ADDR_SURF_TILE_SPLIT_2KB;
+       case 4:
+               return ADDR_SURF_TILE_SPLIT_4KB;
+       }
+}
+
 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
 {
        int i;
@@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                }
                break;
        case DB_Z_INFO:
-               r = evergreen_cs_packet_next_reloc(p, &reloc);
-               if (r) {
-                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
-                                       "0x%04X\n", reg);
-                       return -EINVAL;
-               }
                track->db_z_info = radeon_get_ib_value(p, idx);
-               ib[idx] &= ~Z_ARRAY_MODE(0xf);
-               track->db_z_info &= ~Z_ARRAY_MODE(0xf);
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                       ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-               } else {
-                       ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+               if (!p->keep_tiling_flags) {
+                       r = evergreen_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                               "0x%04X\n", reg);
+                               return -EINVAL;
+                       }
+                       ib[idx] &= ~Z_ARRAY_MODE(0xf);
+                       track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+                       ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                               ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                               ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+                       }
                }
                break;
        case DB_STENCIL_INFO:
@@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR5_INFO:
        case CB_COLOR6_INFO:
        case CB_COLOR7_INFO:
-               r = evergreen_cs_packet_next_reloc(p, &reloc);
-               if (r) {
-                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
-                                       "0x%04X\n", reg);
-                       return -EINVAL;
-               }
                tmp = (reg - CB_COLOR0_INFO) / 0x3c;
                track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                       ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-               } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                       ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+               if (!p->keep_tiling_flags) {
+                       r = evergreen_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                               "0x%04X\n", reg);
+                               return -EINVAL;
+                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR8_INFO:
        case CB_COLOR9_INFO:
        case CB_COLOR10_INFO:
        case CB_COLOR11_INFO:
-               r = evergreen_cs_packet_next_reloc(p, &reloc);
-               if (r) {
-                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
-                                       "0x%04X\n", reg);
-                       return -EINVAL;
-               }
                tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
                track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                       ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-               } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                       ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+               if (!p->keep_tiling_flags) {
+                       r = evergreen_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                               "0x%04X\n", reg);
+                               return -EINVAL;
+                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR0_PITCH:
@@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR9_ATTRIB:
        case CB_COLOR10_ATTRIB:
        case CB_COLOR11_ATTRIB:
+               r = evergreen_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                       "0x%04X\n", reg);
+                       return -EINVAL;
+               }
+               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                       ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+               }
                break;
        case CB_COLOR0_DIM:
        case CB_COLOR1_DIM:
@@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                        return -EINVAL;
                                }
                                ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                                       ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                                       ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+                               if (!p->keep_tiling_flags) {
+                                       ib[idx+1+(i*8)+1] |=
+                                               TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                                               ib[idx+1+(i*8)+6] |=
+                                                       TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+                                               ib[idx+1+(i*8)+7] |=
+                                                       TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                                       }
+                               }
                                texture = reloc->robj;
                                /* tex mip base */
                                r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
 {
        struct radeon_cs_packet pkt;
        struct evergreen_cs_track *track;
+       u32 tmp;
        int r;
 
        if (p->track == NULL) {
@@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                if (track == NULL)
                        return -ENOMEM;
                evergreen_cs_track_init(track);
-               track->npipes = p->rdev->config.evergreen.tiling_npipes;
-               track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
-               track->group_size = p->rdev->config.evergreen.tiling_group_size;
+               if (p->rdev->family >= CHIP_CAYMAN)
+                       tmp = p->rdev->config.cayman.tile_config;
+               else
+                       tmp = p->rdev->config.evergreen.tile_config;
+
+               switch (tmp & 0xf) {
+               case 0:
+                       track->npipes = 1;
+                       break;
+               case 1:
+               default:
+                       track->npipes = 2;
+                       break;
+               case 2:
+                       track->npipes = 4;
+                       break;
+               case 3:
+                       track->npipes = 8;
+                       break;
+               }
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0:
+                       track->nbanks = 4;
+                       break;
+               case 1:
+               default:
+                       track->nbanks = 8;
+                       break;
+               case 2:
+                       track->nbanks = 16;
+                       break;
+               }
+
+               switch ((tmp & 0xf00) >> 8) {
+               case 0:
+                       track->group_size = 256;
+                       break;
+               case 1:
+               default:
+                       track->group_size = 512;
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0:
+                       track->row_size = 1;
+                       break;
+               case 1:
+               default:
+                       track->row_size = 2;
+                       break;
+               case 2:
+                       track->row_size = 4;
+                       break;
+               }
+
                p->track = track;
        }
        do {
index c781c92c3451285a14360f57bf18a53802dca1ab..7d7f2155e34c305729f8487c55fc8c2c741ccc44 100644 (file)
 #       define EVERGREEN_GRPH_DEPTH_8BPP                0
 #       define EVERGREEN_GRPH_DEPTH_16BPP               1
 #       define EVERGREEN_GRPH_DEPTH_32BPP               2
+#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define EVERGREEN_ADDR_SURF_2_BANK               0
+#       define EVERGREEN_ADDR_SURF_4_BANK               1
+#       define EVERGREEN_ADDR_SURF_8_BANK               2
+#       define EVERGREEN_ADDR_SURF_16_BANK              3
+#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
 #       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
 /* 8 BPP */
 #       define EVERGREEN_GRPH_FORMAT_INDEXED            0
 #       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
 #       define EVERGREEN_GRPH_FORMAT_RGB111110          6
 #       define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
 #       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
index b937c49054d9df9d63fea1279d6a1e123fd899e0..e00039e59a75b2bceb45ff69f63b740fd3193f40 100644 (file)
 #define DB_HTILE_DATA_BASE                             0x28014
 #define DB_Z_INFO                                      0x28040
 #       define Z_ARRAY_MODE(x)                          ((x) << 4)
+#       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+#       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+#       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+#       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
 #define DB_STENCIL_INFO                                        0x28044
 #define DB_Z_READ_BASE                                 0x28048
 #define DB_STENCIL_READ_BASE                           0x2804c
 #      define CB_SF_EXPORT_FULL                        0
 #      define CB_SF_EXPORT_NORM                        1
 #define        CB_COLOR0_ATTRIB                                0x28c74
+#       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+#       define ADDR_SURF_TILE_SPLIT_64B                 0
+#       define ADDR_SURF_TILE_SPLIT_128B                1
+#       define ADDR_SURF_TILE_SPLIT_256B                2
+#       define ADDR_SURF_TILE_SPLIT_512B                3
+#       define ADDR_SURF_TILE_SPLIT_1KB                 4
+#       define ADDR_SURF_TILE_SPLIT_2KB                 5
+#       define ADDR_SURF_TILE_SPLIT_4KB                 6
+#       define CB_NUM_BANKS(x)                          (((x) & 0x3) << 10)
+#       define ADDR_SURF_2_BANK                         0
+#       define ADDR_SURF_4_BANK                         1
+#       define ADDR_SURF_8_BANK                         2
+#       define ADDR_SURF_16_BANK                        3
+#       define CB_BANK_WIDTH(x)                         (((x) & 0x3) << 13)
+#       define ADDR_SURF_BANK_WIDTH_1                   0
+#       define ADDR_SURF_BANK_WIDTH_2                   1
+#       define ADDR_SURF_BANK_WIDTH_4                   2
+#       define ADDR_SURF_BANK_WIDTH_8                   3
+#       define CB_BANK_HEIGHT(x)                        (((x) & 0x3) << 16)
+#       define ADDR_SURF_BANK_HEIGHT_1                  0
+#       define ADDR_SURF_BANK_HEIGHT_2                  1
+#       define ADDR_SURF_BANK_HEIGHT_4                  2
+#       define ADDR_SURF_BANK_HEIGHT_8                  3
 #define        CB_COLOR0_DIM                                   0x28c78
 /* only CB0-7 blocks have these regs */
 #define        CB_COLOR0_CMASK                                 0x28c7c
 #      define SQ_SEL_1                                 5
 #define SQ_TEX_RESOURCE_WORD5_0                         0x30014
 #define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+#       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
 #define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
+#       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+#       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+#       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
 
 #define SQ_VTX_CONSTANT_WORD0_0                                0x30000
 #define SQ_VTX_CONSTANT_WORD1_0                                0x30004
index ad158ea499015e1ceb8ca956c611d9f8944a133d..bfc08f6320f83b83569bec08a2968014c9e90a4f 100644 (file)
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+       int i;
 
        /* Lock the graphics update lock */
        /* update the scanout addresses */
        WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 400b26df652a8305c04d58f46db998672646b150..c93bc64707e1d6e3b59a5e2a7fe632b19e6efe99 100644 (file)
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
 
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                       tile_flags |= R300_TXO_MACRO_TILE;
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                       tile_flags |= R300_TXO_MICRO_TILE;
-               else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
-                       tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
-
-               tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
-               tmp |= tile_flags;
-               ib[idx] = tmp;
+               if (p->keep_tiling_flags) {
+                       ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+                                 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+               } else {
+                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                               tile_flags |= R300_TXO_MACRO_TILE;
+                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                               tile_flags |= R300_TXO_MICRO_TILE;
+                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+                               tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+                       tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+                       tmp |= tile_flags;
+                       ib[idx] = tmp;
+               }
                track->textures[i].robj = reloc->robj;
                track->tex_dirty = true;
                break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                /* RB3D_COLORPITCH1 */
                /* RB3D_COLORPITCH2 */
                /* RB3D_COLORPITCH3 */
-               r = r100_cs_packet_next_reloc(p, &reloc);
-               if (r) {
-                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-                                 idx, reg);
-                       r100_cs_dump_packet(p, pkt);
-                       return r;
-               }
+               if (!p->keep_tiling_flags) {
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                         idx, reg);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
 
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                       tile_flags |= R300_COLOR_TILE_ENABLE;
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                       tile_flags |= R300_COLOR_MICROTILE_ENABLE;
-               else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
-                       tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                               tile_flags |= R300_COLOR_TILE_ENABLE;
+                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                               tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+                               tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
 
-               tmp = idx_value & ~(0x7 << 16);
-               tmp |= tile_flags;
-               ib[idx] = tmp;
+                       tmp = idx_value & ~(0x7 << 16);
+                       tmp |= tile_flags;
+                       ib[idx] = tmp;
+               }
                i = (reg - 0x4E38) >> 2;
                track->cb[i].pitch = idx_value & 0x3FFE;
                switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                break;
        case 0x4F24:
                /* ZB_DEPTHPITCH */
-               r = r100_cs_packet_next_reloc(p, &reloc);
-               if (r) {
-                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-                                 idx, reg);
-                       r100_cs_dump_packet(p, pkt);
-                       return r;
-               }
-
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                       tile_flags |= R300_DEPTHMACROTILE_ENABLE;
-               if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                       tile_flags |= R300_DEPTHMICROTILE_TILED;
-               else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
-                       tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+               if (!p->keep_tiling_flags) {
+                       r = r100_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                         idx, reg);
+                               r100_cs_dump_packet(p, pkt);
+                               return r;
+                       }
 
-               tmp = idx_value & ~(0x7 << 16);
-               tmp |= tile_flags;
-               ib[idx] = tmp;
+                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                               tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+                       if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                               tile_flags |= R300_DEPTHMICROTILE_TILED;
+                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+                               tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
 
+                       tmp = idx_value & ~(0x7 << 16);
+                       tmp |= tile_flags;
+                       ib[idx] = tmp;
+               }
                track->zb.pitch = idx_value & 0x3FFC;
                track->zb_dirty = true;
                break;
index 0a2e023c15570ffdbd8c78280ac84123b6b17034..cb1acffd24303aca4c49b11e62b74719a527d51b 100644 (file)
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                track->db_depth_control = radeon_get_ib_value(p, idx);
                break;
        case R_028010_DB_DEPTH_INFO:
-               if (r600_cs_packet_next_is_pkt3_nop(p)) {
+               if (!p->keep_tiling_flags &&
+                   r600_cs_packet_next_is_pkt3_nop(p)) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case R_0280B4_CB_COLOR5_INFO:
        case R_0280B8_CB_COLOR6_INFO:
        case R_0280BC_CB_COLOR7_INFO:
-               if (r600_cs_packet_next_is_pkt3_nop(p)) {
+               if (!p->keep_tiling_flags &&
+                    r600_cs_packet_next_is_pkt3_nop(p)) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
        mip_offset <<= 8;
 
        word0 = radeon_get_ib_value(p, idx + 0);
-       if (tiling_flags & RADEON_TILING_MACRO)
-               word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
-       else if (tiling_flags & RADEON_TILING_MICRO)
-               word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+       if (!p->keep_tiling_flags) {
+               if (tiling_flags & RADEON_TILING_MACRO)
+                       word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+               else if (tiling_flags & RADEON_TILING_MICRO)
+                       word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+       }
        word1 = radeon_get_ib_value(p, idx + 1);
        w0 = G_038000_TEX_WIDTH(word0) + 1;
        h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                        return -EINVAL;
                                }
                                base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                                       ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
-                               else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                                       ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+                               if (!p->keep_tiling_flags) {
+                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+                                               ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+                                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+                                               ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+                               }
                                texture = reloc->robj;
                                /* tex mip base */
                                r = r600_cs_packet_next_reloc(p, &reloc);
index fc5a1d642cb509236db4c57c5ac64472c83ebee4..8227e76b5c70a1d1b97bcac741f1559272a2250e 100644 (file)
@@ -611,7 +611,8 @@ struct radeon_cs_parser {
        struct radeon_ib        *ib;
        void                    *track;
        unsigned                family;
-       int parser_error;
+       int                     parser_error;
+       bool                    keep_tiling_flags;
 };
 
 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
index 3f6636bb2d7f874abf0208cff67010f82c2f4e75..3516a6081dcfcc3acb35d4d64286ea0f321da01e 100644 (file)
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-               printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+               DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+                                acpi_format_exception(status));
                kfree(buffer.pointer);
                return 1;
        }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
        acpi_handle handle;
        int ret;
 
-       /* No need to proceed if we're sure that ATIF is not supported */
-       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
-               return 0;
-
        /* Get the device handle */
        handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
 
+       /* No need to proceed if we're sure that ATIF is not supported */
+       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+               return 0;
+
        /* Call the ATIF method */
        ret = radeon_atif_call(handle);
        if (ret)
index d2d179267af3fc878d210b40563e55c0a58eeac3..5082d17d14dcda9733ca6d79b8d70513a7bd9944 100644 (file)
@@ -62,6 +62,87 @@ union atom_supported_devices {
        struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
 };
 
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+                                         ATOM_GPIO_I2C_ASSIGMENT *gpio,
+                                         u8 index)
+{
+       /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+       if ((rdev->family == CHIP_R420) ||
+           (rdev->family == CHIP_R423) ||
+           (rdev->family == CHIP_RV410)) {
+               if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+                       gpio->ucClkMaskShift = 0x19;
+                       gpio->ucDataMaskShift = 0x18;
+               }
+       }
+
+       /* some evergreen boards have bad data for this entry */
+       if (ASIC_IS_DCE4(rdev)) {
+               if ((index == 7) &&
+                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+                   (gpio->sucI2cId.ucAccess == 0)) {
+                       gpio->sucI2cId.ucAccess = 0x97;
+                       gpio->ucDataMaskShift = 8;
+                       gpio->ucDataEnShift = 8;
+                       gpio->ucDataY_Shift = 8;
+                       gpio->ucDataA_Shift = 8;
+               }
+       }
+
+       /* some DCE3 boards have bad data for this entry */
+       if (ASIC_IS_DCE3(rdev)) {
+               if ((index == 4) &&
+                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+                   (gpio->sucI2cId.ucAccess == 0x94))
+                       gpio->sucI2cId.ucAccess = 0x14;
+       }
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+       struct radeon_i2c_bus_rec i2c;
+
+       memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+       i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+       i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+       i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+       i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+       i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+       i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+       i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+       i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+       i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+       i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+       i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+       i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+       i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+       i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+       i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+       i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+       if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+               i2c.hw_capable = true;
+       else
+               i2c.hw_capable = false;
+
+       if (gpio->sucI2cId.ucAccess == 0xa0)
+               i2c.mm_i2c = true;
+       else
+               i2c.mm_i2c = false;
+
+       i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+       if (i2c.mask_clk_reg)
+               i2c.valid = true;
+       else
+               i2c.valid = false;
+
+       return i2c;
+}
+
 static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
                                                               uint8_t id)
 {
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
                for (i = 0; i < num_indices; i++) {
                        gpio = &i2c_info->asGPIO_Info[i];
 
-                       /* some evergreen boards have bad data for this entry */
-                       if (ASIC_IS_DCE4(rdev)) {
-                               if ((i == 7) &&
-                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
-                                   (gpio->sucI2cId.ucAccess == 0)) {
-                                       gpio->sucI2cId.ucAccess = 0x97;
-                                       gpio->ucDataMaskShift = 8;
-                                       gpio->ucDataEnShift = 8;
-                                       gpio->ucDataY_Shift = 8;
-                                       gpio->ucDataA_Shift = 8;
-                               }
-                       }
-
-                       /* some DCE3 boards have bad data for this entry */
-                       if (ASIC_IS_DCE3(rdev)) {
-                               if ((i == 4) &&
-                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
-                                   (gpio->sucI2cId.ucAccess == 0x94))
-                                       gpio->sucI2cId.ucAccess = 0x14;
-                       }
+                       radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
                        if (gpio->sucI2cId.ucAccess == id) {
-                               i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
-                               i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
-                               i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
-                               i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
-                               i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
-                               i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
-                               i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
-                               i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
-                               i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
-                               i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
-                               i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
-                               i2c.en_data_mask = (1 << gpio->ucDataEnShift);
-                               i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
-                               i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
-                               i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
-                               i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
-                               if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
-                                       i2c.hw_capable = true;
-                               else
-                                       i2c.hw_capable = false;
-
-                               if (gpio->sucI2cId.ucAccess == 0xa0)
-                                       i2c.mm_i2c = true;
-                               else
-                                       i2c.mm_i2c = false;
-
-                               i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
-                               if (i2c.mask_clk_reg)
-                                       i2c.valid = true;
+                               i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
                                break;
                        }
                }
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
        int i, num_indices;
        char stmp[32];
 
-       memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
-
        if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
                i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
 
                for (i = 0; i < num_indices; i++) {
                        gpio = &i2c_info->asGPIO_Info[i];
-                       i2c.valid = false;
-
-                       /* some evergreen boards have bad data for this entry */
-                       if (ASIC_IS_DCE4(rdev)) {
-                               if ((i == 7) &&
-                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
-                                   (gpio->sucI2cId.ucAccess == 0)) {
-                                       gpio->sucI2cId.ucAccess = 0x97;
-                                       gpio->ucDataMaskShift = 8;
-                                       gpio->ucDataEnShift = 8;
-                                       gpio->ucDataY_Shift = 8;
-                                       gpio->ucDataA_Shift = 8;
-                               }
-                       }
-
-                       /* some DCE3 boards have bad data for this entry */
-                       if (ASIC_IS_DCE3(rdev)) {
-                               if ((i == 4) &&
-                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
-                                   (gpio->sucI2cId.ucAccess == 0x94))
-                                       gpio->sucI2cId.ucAccess = 0x14;
-                       }
-
-                       i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
-                       i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
-                       i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
-                       i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
-                       i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
-                       i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
-                       i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
-                       i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
-                       i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
-                       i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
-                       i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
-                       i2c.en_data_mask = (1 << gpio->ucDataEnShift);
-                       i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
-                       i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
-                       i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
-                       i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
-                       if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
-                               i2c.hw_capable = true;
-                       else
-                               i2c.hw_capable = false;
 
-                       if (gpio->sucI2cId.ucAccess == 0xa0)
-                               i2c.mm_i2c = true;
-                       else
-                               i2c.mm_i2c = false;
+                       radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
-                       i2c.i2c_id = gpio->sucI2cId.ucAccess;
+                       i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
 
-                       if (i2c.mask_clk_reg) {
-                               i2c.valid = true;
+                       if (i2c.valid) {
                                sprintf(stmp, "0x%x", i2c.i2c_id);
                                rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
                        }
@@ -1996,14 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                return state_index;
        /* last mode is usually default, array is low to high */
        for (i = 0; i < num_modes; i++) {
+               rdev->pm.power_state[state_index].clock_info =
+                       kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+               if (!rdev->pm.power_state[state_index].clock_info)
+                       return state_index;
+               rdev->pm.power_state[state_index].num_clock_modes = 1;
                rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
                switch (frev) {
                case 1:
-                       rdev->pm.power_state[state_index].clock_info =
-                               kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
-                       if (!rdev->pm.power_state[state_index].clock_info)
-                               return state_index;
-                       rdev->pm.power_state[state_index].num_clock_modes = 1;
                        rdev->pm.power_state[state_index].clock_info[0].mclk =
                                le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
                        rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2039,11 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                        state_index++;
                        break;
                case 2:
-                       rdev->pm.power_state[state_index].clock_info =
-                               kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
-                       if (!rdev->pm.power_state[state_index].clock_info)
-                               return state_index;
-                       rdev->pm.power_state[state_index].num_clock_modes = 1;
                        rdev->pm.power_state[state_index].clock_info[0].mclk =
                                le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
                        rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2080,11 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                        state_index++;
                        break;
                case 3:
-                       rdev->pm.power_state[state_index].clock_info =
-                               kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
-                       if (!rdev->pm.power_state[state_index].clock_info)
-                               return state_index;
-                       rdev->pm.power_state[state_index].num_clock_modes = 1;
                        rdev->pm.power_state[state_index].clock_info[0].mclk =
                                le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
                        rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2588,7 +2560,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
 
        rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
        rdev->pm.current_clock_mode_index = 0;
-       rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       if (rdev->pm.default_power_state_index >= 0)
+               rdev->pm.current_vddc =
+                       rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       else
+               rdev->pm.current_vddc = 0;
 }
 
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
index ccaa243c1442f8fe045df65af570e059baf11fb0..29afd71e0840a0b9995b5e30027577adeec45b92 100644 (file)
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 {
        struct drm_radeon_cs *cs = data;
        uint64_t *chunk_array_ptr;
-       unsigned size, i;
+       unsigned size, i, flags = 0;
 
        if (!cs->num_chunks) {
                return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
+               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
+                   !p->chunks[i].length_dw) {
+                       return -EINVAL;
+               }
 
                p->chunks[i].length_dw = user_chunk.length_dw;
                p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                                               p->chunks[i].user_ptr, size)) {
                                return -EFAULT;
                        }
+                       if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+                               flags = p->chunks[i].kdata[0];
+                       }
                } else {
                        p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                          p->chunks[p->chunk_ib_idx].length_dw);
                return -EINVAL;
        }
+
+       p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
        return 0;
 }
 
index a0b35e9094896cf90202d8f54effd942b0398054..71499fc3daf524f8b719692e9ff7aacb6219b2e9 100644 (file)
  *   2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
  *   2.10.0 - fusion 2D tiling
  *   2.11.0 - backend map, initial compute support for the CS checker
+ *   2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       11
+#define KMS_DRIVER_MINOR       12
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 06e413e6a920207850734185f1c459989616f625..4b27efa4405b94b63011b2e8948d678c35ccfd62 100644 (file)
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_TRAVIS:
                case ENCODER_OBJECT_ID_NUTMEG:
-                       return true;
+                       return radeon_encoder->encoder_id;
                default:
-                       return false;
+                       return ENCODER_OBJECT_ID_NONE;
                }
        }
-
-       return false;
+       return ENCODER_OBJECT_ID_NONE;
 }
 
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
index 481b99e89f6542d661c4f0697d413a6eff40f821..b1053d64042313df931b9cde933e121bfa814517 100644 (file)
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index a983f410ab89d7d549530d7b7c2f42287a6a7969..23ae1c60ab3d97b8a1576c567054b736fd70eb93 100644 (file)
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 617b64678fc62f24c70df6ae3fd7ba3bc7d6c9bf..0bb0f5f713e6cf9166a1b91eb19ab81b85206716 100644 (file)
@@ -574,10 +574,16 @@ retry:
                return ret;
 
        spin_lock(&glob->lru_lock);
+
+       if (unlikely(list_empty(&bo->ddestroy))) {
+               spin_unlock(&glob->lru_lock);
+               return 0;
+       }
+
        ret = ttm_bo_reserve_locked(bo, interruptible,
                                    no_wait_reserve, false, 0);
 
-       if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
+       if (unlikely(ret != 0)) {
                spin_unlock(&glob->lru_lock);
                return ret;
        }
index 8cca91a93bde092af78c6c7bdcfb0ffceb3f04a6..dc279706ca7027d100761be6a45c54d9001534ac 100644 (file)
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
                             struct ttm_object_file *tfile,
                             int id,
                             struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                                 struct ttm_object_file *tfile,
+                                 uint32_t handle,
+                                 struct vmw_surface **out_surf,
+                                 struct vmw_dma_buffer **out_buf);
 extern void vmw_surface_res_free(struct vmw_resource *res);
 extern int vmw_surface_init(struct vmw_private *dev_priv,
                            struct vmw_surface *srf,
index 03bbc2a6f9a738439a8d8f59a027b87ec86c72db..a0c2f12b1e1b8fffb8e4422c275a91b0eb6a4eaf 100644 (file)
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
        uint32_t fifo_min, hwversion;
+       const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
                return false;
 
-       hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+       hwversion = ioread32(fifo_mem +
+                            ((fifo->capabilities &
+                              SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                             SVGA_FIFO_3D_HWVERSION_REVISED :
+                             SVGA_FIFO_3D_HWVERSION));
+
        if (hwversion == 0)
                return false;
 
index 3f6343502d1f96aea110dccdeb59a39cb9fe0065..66917c6c3813f6ff6cee579786864421c9dda0d4 100644 (file)
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_FIFO_HW_VERSION:
        {
                __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
-               param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+               const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+               param->value =
+                       ioread32(fifo_mem +
+                                ((fifo->capabilities &
+                                  SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                                 SVGA_FIFO_3D_HWVERSION_REVISED :
+                                 SVGA_FIFO_3D_HWVERSION));
                break;
        }
        default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                ret = -EINVAL;
                goto out_no_fb;
        }
-
        vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
-       if (!vfb->dmabuf) {
-               DRM_ERROR("Framebuffer not dmabuf backed.\n");
-               ret = -EINVAL;
-               goto out_no_fb;
-       }
 
        ret = ttm_read_lock(&vmaster->lock, true);
        if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
index 880e285d7578afa3ebeae75de4dad77443d754ce..f94b33ae221546a9d9170f3e0eb6b8292776721b 100644 (file)
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
+
+struct vmw_clip_rect {
+       int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+                       int num_rects,
+                       struct vmw_clip_rect clip,
+                       SVGASignedRect *out_rects,
+                       int *out_num)
+{
+       int i, k;
+
+       for (i = 0, k = 0; i < num_rects; i++) {
+               int x1 = max_t(int, clip.x1, rects[i].x1);
+               int y1 = max_t(int, clip.y1, rects[i].y1);
+               int x2 = min_t(int, clip.x2, rects[i].x2);
+               int y2 = min_t(int, clip.y2, rects[i].y2);
+
+               if (x1 >= x2)
+                       continue;
+               if (y1 >= y2)
+                       continue;
+
+               out_rects[k].left   = x1;
+               out_rects[k].top    = y1;
+               out_rects[k].right  = x2;
+               out_rects[k].bottom = y2;
+               k++;
+       }
+
+       *out_num = k;
+}
+
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
        if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
        return 0;
 }
 
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY)
+{
+       struct ttm_bo_kmap_obj map;
+       unsigned long kmap_offset;
+       unsigned long kmap_num;
+       void *virtual;
+       bool dummy;
+       int ret;
+
+       kmap_offset = 0;
+       kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("reserve failed\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0))
+               goto err_unreserve;
+
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+                                     hotspotX, hotspotY);
+
+       ttm_bo_kunmap(&map);
+err_unreserve:
+       ttm_bo_unreserve(&dmabuf->base);
+
+       return ret;
+}
+
+
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y)
 {
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                return -EINVAL;
 
        if (handle) {
-               ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                                    handle, &surface);
-               if (!ret) {
-                       if (!surface->snooper.image) {
-                               DRM_ERROR("surface not suitable for cursor\n");
-                               vmw_surface_unreference(&surface);
-                               return -EINVAL;
-                       }
-               } else {
-                       ret = vmw_user_dmabuf_lookup(tfile,
-                                                    handle, &dmabuf);
-                       if (ret) {
-                               DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
-                               return -EINVAL;
-                       }
+               ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                            handle, &surface, &dmabuf);
+               if (ret) {
+                       DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+                       return -EINVAL;
                }
        }
 
+       /* need to do this before taking down old image */
+       if (surface && !surface->snooper.image) {
+               DRM_ERROR("surface not suitable for cursor\n");
+               vmw_surface_unreference(&surface);
+               return -EINVAL;
+       }
+
        /* takedown old cursor */
        if (du->cursor_surface) {
                du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                vmw_cursor_update_image(dev_priv, surface->snooper.image,
                                        64, 64, du->hotspot_x, du->hotspot_y);
        } else if (dmabuf) {
-               struct ttm_bo_kmap_obj map;
-               unsigned long kmap_offset;
-               unsigned long kmap_num;
-               void *virtual;
-               bool dummy;
-
                /* vmw_user_surface_lookup takes one reference */
                du->cursor_dmabuf = dmabuf;
 
-               kmap_offset = 0;
-               kmap_num = (64*64*4) >> PAGE_SHIFT;
-
-               ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("reserve failed\n");
-                       return -EINVAL;
-               }
-
-               ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
-               if (unlikely(ret != 0))
-                       goto err_unreserve;
-
-               virtual = ttm_kmap_obj_virtual(&map, &dummy);
-               vmw_cursor_update_image(dev_priv, virtual, 64, 64,
-                                       du->hotspot_x, du->hotspot_y);
-
-               ttm_bo_kunmap(&map);
-err_unreserve:
-               ttm_bo_unreserve(&dmabuf->base);
-
+               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+                                              du->hotspot_x, du->hotspot_y);
        } else {
                vmw_cursor_update_position(dev_priv, false, 0, 0);
                return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                                struct drm_clip_rect *clips,
                                unsigned num_clips, int inc)
 {
-       struct drm_clip_rect *clips_ptr;
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *clips_ptr;
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        } *cmd;
        SVGASignedRect *blits;
 
-
        num_units = 0;
        list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
                            head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
 
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kzalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Temporary fifo memory alloc failed.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
        }
 
+       /* setup blits pointer */
+       blits = (SVGASignedRect *)&cmd[1];
+
+       /* initial clip region */
        left = clips->x1;
        right = clips->x2;
        top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        cmd->body.srcRect.bottom = bottom;
 
        clips_ptr = clips;
-       blits = (SVGASignedRect *)&cmd[1];
        for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-               blits[i].left   = clips_ptr->x1 - left;
-               blits[i].right  = clips_ptr->x2 - left;
-               blits[i].top    = clips_ptr->y1 - top;
-               blits[i].bottom = clips_ptr->y2 - top;
+               tmp[i].x1 = clips_ptr->x1 - left;
+               tmp[i].x2 = clips_ptr->x2 - left;
+               tmp[i].y1 = clips_ptr->y1 - top;
+               tmp[i].y2 = clips_ptr->y2 - top;
        }
 
        /* do per unit writing, reuse fifo for each */
        for (i = 0; i < num_units; i++) {
                struct vmw_display_unit *unit = units[i];
-               int clip_x1 = left - unit->crtc.x;
-               int clip_y1 = top - unit->crtc.y;
-               int clip_x2 = right - unit->crtc.x;
-               int clip_y2 = bottom - unit->crtc.y;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left - unit->crtc.x;
+               clip.y1 = top - unit->crtc.y;
+               clip.x2 = right - unit->crtc.x;
+               clip.y2 = bottom - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                        break;
        }
 
+
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
         * Sanity checks.
         */
 
+       /* Surface must be marked as a scanout. */
+       if (unlikely(!surface->scanout))
+               return -EINVAL;
+
        if (unlikely(surface->mip_levels[0] != 1 ||
                     surface->num_sizes != 1 ||
                     surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                        int clip_y1 = clips_ptr->y1 - unit->crtc.y;
                        int clip_x2 = clips_ptr->x2 - unit->crtc.x;
                        int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+                       int move_x, move_y;
 
                        /* skip any crtcs that misses the clip region */
                        if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                            clip_x2 <= 0 || clip_y2 <= 0)
                                continue;
 
+                       /* clip size to crtc size */
+                       clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+                       clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+                       /* translate both src and dest to bring clip into screen */
+                       move_x = min_t(int, clip_x1, 0);
+                       move_y = min_t(int, clip_y1, 0);
+
+                       /* actual translate done here */
                        blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
                        blits[hit_num].body.destScreenId = unit->unit;
-                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
-                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
-                       blits[hit_num].body.destRect.left = clip_x1;
-                       blits[hit_num].body.destRect.top = clip_y1;
+                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+                       blits[hit_num].body.destRect.left = clip_x1 - move_x;
+                       blits[hit_num].body.destRect.top = clip_y1 - move_y;
                        blits[hit_num].body.destRect.right = clip_x2;
                        blits[hit_num].body.destRect.bottom = clip_y2;
                        hit_num++;
@@ -1003,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
        struct ttm_base_object *user_obj;
-       u64 required_size;
        int ret;
 
        /**
@@ -1012,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * requested framebuffer.
         */
 
-       required_size = mode_cmd->pitch * mode_cmd->height;
-       if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+       if (!vmw_kms_validate_mode_vram(dev_priv,
+                                       mode_cmd->pitch,
+                                       mode_cmd->height)) {
                DRM_ERROR("VRAM size is too small for requested mode.\n");
                return ERR_PTR(-ENOMEM);
        }
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
                return ERR_PTR(-ENOENT);
        }
 
-       /**
-        * End conditioned code.
-        */
-
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                            mode_cmd->handle, &surface);
+       /* returns either a dmabuf or surface */
+       ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                    mode_cmd->handle,
+                                    &surface, &bo);
        if (ret)
-               goto try_dmabuf;
-
-       if (!surface->scanout)
-               goto err_not_scanout;
-
-       ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
-                                             &vfb, mode_cmd);
-
-       /* vmw_user_surface_lookup takes one ref so does new_fb */
-       vmw_surface_unreference(&surface);
-
-       if (ret) {
-               DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
-               ttm_base_object_unref(&user_obj);
-               return ERR_PTR(ret);
-       } else
-               vfb->user_obj = user_obj;
-       return &vfb->base;
-
-try_dmabuf:
-       DRM_INFO("%s: trying buffer\n", __func__);
-
-       ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
-       if (ret) {
-               DRM_ERROR("failed to find buffer: %i\n", ret);
-               return ERR_PTR(-ENOENT);
-       }
-
-       ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                            mode_cmd);
+               goto err_out;
+
+       /* Create the new framebuffer depending one what we got back */
+       if (bo)
+               ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+                                                    mode_cmd);
+       else if (surface)
+               ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+                                                     surface, &vfb, mode_cmd);
+       else
+               BUG();
 
-       /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
-       vmw_dmabuf_unreference(&bo);
+err_out:
+       /* vmw_user_lookup_handle takes one ref so does new_fb */
+       if (bo)
+               vmw_dmabuf_unreference(&bo);
+       if (surface)
+               vmw_surface_unreference(&surface);
 
        if (ret) {
                DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
                vfb->user_obj = user_obj;
 
        return &vfb->base;
-
-err_not_scanout:
-       DRM_ERROR("surface not marked as scanout\n");
-       /* vmw_user_surface_lookup takes one ref */
-       vmw_surface_unreference(&surface);
-       ttm_base_object_unref(&user_obj);
-
-       return ERR_PTR(-EINVAL);
 }
 
 static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
                    uint32_t num_clips)
 {
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, k, num_units;
        int ret = 0; /* silence warning */
+       int left, right, top, bottom;
 
        struct {
                SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        BUG_ON(surface == NULL);
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kmalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
+       }
+
+       left = clips->x;
+       right = clips->x + clips->w;
+       top = clips->y;
+       bottom = clips->y + clips->h;
+
+       for (i = 1; i < num_clips; i++) {
+               left = min_t(int, left, (int)clips[i].x);
+               right = max_t(int, right, (int)clips[i].x + clips[i].w);
+               top = min_t(int, top, (int)clips[i].y);
+               bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
        }
 
        /* only need to do this once */
        memset(cmd, 0, fifo_size);
        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-       cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-       cmd->body.srcRect.left = 0;
-       cmd->body.srcRect.right = surface->sizes[0].width;
-       cmd->body.srcRect.top = 0;
-       cmd->body.srcRect.bottom = surface->sizes[0].height;
 
        blits = (SVGASignedRect *)&cmd[1];
+
+       cmd->body.srcRect.left = left;
+       cmd->body.srcRect.right = right;
+       cmd->body.srcRect.top = top;
+       cmd->body.srcRect.bottom = bottom;
+
        for (i = 0; i < num_clips; i++) {
-               blits[i].left   = clips[i].x;
-               blits[i].right  = clips[i].x + clips[i].w;
-               blits[i].top    = clips[i].y;
-               blits[i].bottom = clips[i].y + clips[i].h;
+               tmp[i].x1 = clips[i].x - left;
+               tmp[i].x2 = clips[i].x + clips[i].w - left;
+               tmp[i].y1 = clips[i].y - top;
+               tmp[i].y2 = clips[i].y + clips[i].h - top;
        }
 
        for (k = 0; k < num_units; k++) {
                struct vmw_display_unit *unit = units[k];
-               int clip_x1 = destX - unit->crtc.x;
-               int clip_y1 = destY - unit->crtc.y;
-               int clip_x2 = clip_x1 + surface->sizes[0].width;
-               int clip_y2 = clip_y1 + surface->sizes[0].height;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left + destX - unit->crtc.x;
+               clip.y1 = top + destY - unit->crtc.y;
+               clip.x2 = right + destX - unit->crtc.x;
+               clip.y2 = bottom + destY - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = sid;
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        }
 
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
@@ -1809,7 +1913,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
-       rects = kzalloc(rects_size, GFP_KERNEL);
+       rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+                       GFP_KERNEL);
        if (unlikely(!rects)) {
                ret = -ENOMEM;
                goto out_unlock;
@@ -1824,10 +1929,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        for (i = 0; i < arg->num_outputs; ++i) {
-               if (rects->x < 0 ||
-                   rects->y < 0 ||
-                   rects->x + rects->w > mode_config->max_width ||
-                   rects->y + rects->h > mode_config->max_height) {
+               if (rects[i].x < 0 ||
+                   rects[i].y < 0 ||
+                   rects[i].x + rects[i].w > mode_config->max_width ||
+                   rects[i].y + rects[i].h > mode_config->max_height) {
                        DRM_ERROR("Invalid GUI layout.\n");
                        ret = -EINVAL;
                        goto out_free;
index af8e6e5bd964a2dca37f7ba17165256d44a2f8bf..e1cb8556355fc625946783c30286ba3fb9388762 100644 (file)
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
 int vmw_cursor_update_image(struct vmw_private *dev_priv,
                            u32 *image, u32 width, u32 height,
                            u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY);
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y);
 
+
 /**
  * Base class display unit.
  *
index 90c5e39284913353f68c6e4611d59391d557228c..8f8dbd43c33d0116a43fb1206d93a4512aca2229 100644 (file)
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 {
        struct vmw_legacy_display *lds = dev_priv->ldu_priv;
        struct vmw_legacy_display_unit *entry;
+       struct vmw_display_unit *du = NULL;
        struct drm_framebuffer *fb = NULL;
        struct drm_crtc *crtc = NULL;
-       int i = 0;
+       int i = 0, ret;
 
        /* If there is no display topology the host just assumes
         * that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 
        lds->last_num_active = lds->num_active;
 
+
+       /* Find the first du with a cursor. */
+       list_for_each_entry(entry, &lds->active, active) {
+               du = &entry->base;
+
+               if (!du->cursor_dmabuf)
+                       continue;
+
+               ret = vmw_cursor_update_dmabuf(dev_priv,
+                                              du->cursor_dmabuf,
+                                              64, 64,
+                                              du->hotspot_x,
+                                              du->hotspot_y);
+               if (ret == 0)
+                       break;
+
+               DRM_ERROR("Could not update cursor image\n");
+       }
+
        return 0;
 }
 
index 86c5e4cceb31ef83beb568e3b912de62f73a2648..1c7f09e268190a5466fdac896e2d617382c8f2d1 100644 (file)
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
                write_unlock(lock);
 }
 
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                          struct ttm_object_file *tfile,
+                          uint32_t handle,
+                          struct vmw_surface **out_surf,
+                          struct vmw_dma_buffer **out_buf)
+{
+       int ret;
+
+       BUG_ON(*out_surf || *out_buf);
+
+       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+       if (!ret)
+               return 0;
+
+       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+       return ret;
+}
+
 
 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
                                   struct ttm_object_file *tfile,
index c72f1c0b5e637951a450b21378f2edfeb8e92725..111d956d8e7d0d919d088405ddeaf3b341c87092 100644 (file)
@@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
        while (new_bus) {
                new_bridge = new_bus->self;
 
-               if (new_bridge) {
-                       /* go through list of devices already registered */
-                       list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
-                               bus = same_bridge_vgadev->pdev->bus;
-                               bridge = bus->self;
-
-                               /* see if the share a bridge with this device */
-                               if (new_bridge == bridge) {
-                                       /* if their direct parent bridge is the same
-                                          as any bridge of this device then it can't be used
-                                          for that device */
-                                       same_bridge_vgadev->bridge_has_one_vga = false;
-                               }
+               /* go through list of devices already registered */
+               list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
+                       bus = same_bridge_vgadev->pdev->bus;
+                       bridge = bus->self;
+
+                       /* see if the share a bridge with this device */
+                       if (new_bridge == bridge) {
+                               /* if their direct parent bridge is the same
+                                  as any bridge of this device then it can't be used
+                                  for that device */
+                               same_bridge_vgadev->bridge_has_one_vga = false;
+                       }
 
-                               /* now iterate the previous devices bridge hierarchy */
-                               /* if the new devices parent bridge is in the other devices
-                                  hierarchy then we can't use it to control this device */
-                               while (bus) {
-                                       bridge = bus->self;
-                                       if (bridge) {
-                                               if (bridge == vgadev->pdev->bus->self)
-                                                       vgadev->bridge_has_one_vga = false;
-                                       }
-                                       bus = bus->parent;
+                       /* now iterate the previous devices bridge hierarchy */
+                       /* if the new devices parent bridge is in the other devices
+                          hierarchy then we can't use it to control this device */
+                       while (bus) {
+                               bridge = bus->self;
+                               if (bridge) {
+                                       if (bridge == vgadev->pdev->bus->self)
+                                               vgadev->bridge_has_one_vga = false;
                                }
+                               bus = bus->parent;
                        }
                }
                new_bus = new_bus->parent;
@@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
                                uc = &priv->cards[i];
                }
 
-               if (!uc)
-                       return -EINVAL;
+               if (!uc) {
+                       ret_val = -EINVAL;
+                       goto done;
+               }
 
-               if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
-                       return -EINVAL;
+               if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
+                       ret_val = -EINVAL;
+                       goto done;
+               }
 
-               if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
-                       return -EINVAL;
+               if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
+                       ret_val = -EINVAL;
+                       goto done;
+               }
 
                vga_put(pdev, io_state);
 
index 848a56c0279c8ac61687340c732521094428e541..af353842f75feaceadeedcc547eeb880519f86df 100644 (file)
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
index 06ce996b8b6504f65c9216c3173a38cf5a51cb34..4a441a6f996748a923204fbcb53c4850eb3af061 100644 (file)
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH    0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 
 #define USB_VENDOR_ID_GLAB             0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30        0x0038
index 9ec854ae118b91c5fea647d61bab7f29117a10a5..91be41f6080947936bdd355f25500397ecfd1dfe 100644 (file)
@@ -315,7 +315,7 @@ config SENSORS_DS1621
 
 config SENSORS_EXYNOS4_TMU
        tristate "Temperature sensor on Samsung EXYNOS4"
-       depends on EXYNOS4_DEV_TMU
+       depends on ARCH_EXYNOS4
        help
          If you say yes here you get support for TMU (Thermal Managment
          Unit) on SAMSUNG EXYNOS4 series of SoC.
index 318e38e853764eeeea220bfb519079cf20f687bc..5d760f3d21c2d61ee85227ab7107bf4c38e011f9 100644 (file)
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
 static struct spi_driver ad7314_driver = {
        .driver = {
                .name = "ad7314",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
        },
        .probe = ad7314_probe,
index 52319340e182da8f189eae518d80bd3674d26425..04450f8bf5da28e4dff56e70ab86e294920c2cf2 100644 (file)
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
 static struct spi_driver ads7871_driver = {
        .driver = {
                .name = DEVICE_NAME,
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
        },
 
index faa0884f61f67bc49e72e93c075160d46078cba4..f2359a0093bd65e52c6985e6a06eef3a0cec8d37 100644 (file)
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
        .resume = exynos4_tmu_resume,
 };
 
-static int __init exynos4_tmu_driver_init(void)
-{
-       return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
-       platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
 
 MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
index 89aa9fb743af07260c8deadbc8956eb4c1ada2de..9ba38f318ffb458247dd66b58b77c3c03e203727 100644 (file)
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
        },
 };
 
-static int __init gpio_fan_init(void)
-{
-       return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
-       platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
 
 MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
 MODULE_DESCRIPTION("GPIO FAN driver");
index fea292d43407cecba6e75b70021617f871d6b1b8..5253d23361d91a4e93eefb00021bcd3ae4f9915f 100644 (file)
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
 {
        struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
        struct completion *completion = &hwmon->read_completion;
-       unsigned long t;
+       long t;
        unsigned long val;
        int ret;
 
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
        .probe  = jz4740_hwmon_probe,
        .remove = __devexit_p(jz4740_hwmon_remove),
        .driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
        },
 };
 
-static int __init jz4740_hwmon_init(void)
-{
-       return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
-       platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
 
 MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
index eab11615dced6b54e71996bfd4df60c159bbe570..9b382ec2c3bd4de978d98a6d9f24f797684fe534 100644 (file)
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
        .id_table = ntc_thermistor_id,
 };
 
-static int __init ntc_thermistor_init(void)
-{
-       return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
-       platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
 
 MODULE_DESCRIPTION("NTC Thermistor Driver");
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
index b39f52e2752a7bca54a1bb25c7083382b01aa52e..f6c26d19f521aaa98dd2b0596d67ab27028b8d15 100644 (file)
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
        .remove         = __devexit_p(s3c_hwmon_remove),
 };
 
-static int __init s3c_hwmon_init(void)
-{
-       return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
-       platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
 
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("S3C ADC HWMon driver");
index e3b5c6039c2541e67e63066e4311f2dc6c1021b6..79b6dabe3161461a3f1ccdf277588860f7863df9 100644 (file)
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
        .remove         = sch5627_remove,
 };
 
-static int __init sch5627_init(void)
-{
-       return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
-       platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
 
 MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
 MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
index 244407aa79fcbe05dc05b10ae7d88c0327db6d29..9d5236fb09b421e6a1e78cd5b190eaa9d19f6a2d 100644 (file)
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
        .remove         = sch5636_remove,
 };
 
-static int __init sch5636_init(void)
-{
-       return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
-       platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
 
 MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
 MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
index 57240740b161d190329b5ca3b7dfaf0d049ef02e..0018c7dd0097de5045f646d98e715713ea7edba4 100644 (file)
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
                   },
 };
 
-static int __init twl4030_madc_hwmon_init(void)
-{
-       return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
-       platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
 
 MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
 MODULE_LICENSE("GPL");
index 3cd07bf42dca4203c93712823fdf3dd2fb3335fa..b9a87e89bab4471ce29aba1509f14bd908603b3e 100644 (file)
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
        .remove         = __devexit_p(env_remove),
 };
 
-static int __init env_init(void)
-{
-       return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
-       platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
index 97b1f834a4714b540fd95352271bb253ef130c9c..9b598ed26020563978f1142840c4b6a5f0ecad77 100644 (file)
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
        },
 };
 
-static int __init wm831x_hwmon_init(void)
-{
-       return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
-       platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("WM831x Hardware Monitoring");
index 13290595ca8660f25ebbd0e0c350795dc3d6ccc8..3ff67edbdc44af02a7d0553dd2d74530cc00ca6f 100644 (file)
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
        },
 };
 
-static int __init wm8350_hwmon_init(void)
-{
-       return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
-       platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
index 85584a547c25201a1375bf92b074af571d37be0e..525c7345fa0b904242a0ef10a5d9502ab5e4baf1 100644 (file)
@@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
 
        if (flags & I2C_M_TEN) {
                /* a ten bit address */
-               addr = 0xf0 | ((msg->addr >> 7) & 0x03);
+               addr = 0xf0 | ((msg->addr >> 7) & 0x06);
                bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
                /* try extended address code...*/
                ret = try_address(i2c_adap, addr, retries);
@@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
                        return -ENXIO;
                }
                /* the remaining 8 bit address */
-               ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
+               ret = i2c_outb(i2c_adap, msg->addr & 0xff);
                if ((ret != 1) && !nak_ok) {
                        /* the chip did not ack / xmission error occurred */
                        dev_err(&i2c_adap->dev, "died at 2nd address code\n");
index 8cebef49aeaca2b81a94b862387fb0be46dcdaa5..18936ac9d51cd14af2221546fd062c3494ea18ec 100644 (file)
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
        /* Set the number of I2C channel instance */
        adap_info->ch_num = id->driver_data;
 
+       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+                 KBUILD_MODNAME, adap_info);
+       if (ret) {
+               pch_pci_err(pdev, "request_irq FAILED\n");
+               goto err_request_irq;
+       }
+
        for (i = 0; i < adap_info->ch_num; i++) {
                pch_adap = &adap_info->pch_data[i].pch_adapter;
                adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->dev.parent = &pdev->dev;
 
+               pch_i2c_init(&adap_info->pch_data[i]);
                ret = i2c_add_adapter(pch_adap);
                if (ret) {
                        pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
-                       goto err_i2c_add_adapter;
+                       goto err_add_adapter;
                }
-
-               pch_i2c_init(&adap_info->pch_data[i]);
-       }
-       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-                 KBUILD_MODNAME, adap_info);
-       if (ret) {
-               pch_pci_err(pdev, "request_irq FAILED\n");
-               goto err_i2c_add_adapter;
        }
 
        pci_set_drvdata(pdev, adap_info);
        pch_pci_dbg(pdev, "returns %d.\n", ret);
        return 0;
 
-err_i2c_add_adapter:
+err_add_adapter:
        for (j = 0; j < i; j++)
                i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+       free_irq(pdev->irq, adap_info);
+err_request_irq:
        pci_iounmap(pdev, base_addr);
 err_pci_iomap:
        pci_release_regions(pdev);
index 835e47b39bc264535c567669d12a81784a8c90c7..03b61577888748a4d9a61cfe46b413eaaa219737 100644 (file)
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
        i2c->adap.algo_data = i2c;
        i2c->adap.dev.parent = &pdev->dev;
 
-       mfp_set_groupg(&pdev->dev);
+       mfp_set_groupg(&pdev->dev, NULL);
 
        clk_get_rate(i2c->clk);
 
index a43d0023446a873c18586f0de99ac733ee1e6cfc..fa23faa20f0e34435881e24abc6d5e4c9545a259 100644 (file)
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
                 * size. This is to ensure that we can handle the status on int
                 * call back latencies.
                 */
-               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
-                       dev->fifo_size = 0;
+
+               dev->fifo_size = (dev->fifo_size / 2);
+
+               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
                        dev->b_hw = 0; /* Disable hardware fixes */
-               } else {
-                       dev->fifo_size = (dev->fifo_size / 2);
+               else
                        dev->b_hw = 1; /* Enable hardware fixes */
-               }
+
                /* calculate wakeup latency constraint for MPU */
                if (dev->set_mpu_wkup_lat != NULL)
                        dev->latency = (1000000 * dev->fifo_size) /
index 2754cef86a06f882d337a28ef8cc320f1da4727b..4c17180816853a339ddb7a3dcb22b47425786173 100644 (file)
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
 
        /* first, try busy waiting briefly */
        do {
+               cpu_relax();
                iicstat = readl(i2c->regs + S3C2410_IICSTAT);
        } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
 
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
 #else
 static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
 {
-       return -EINVAL;
+       return 0;
 }
 
 static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
index 131079a3e2923a1feaa05248a236d8ee27890c4a..1e5606185b4f581939d6da4334b36747e90c35ba 100644 (file)
@@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
        client->dev.type = &i2c_client_type;
        client->dev.of_node = info->of_node;
 
+       /* For 10-bit clients, add an arbitrary offset to avoid collisions */
        dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
-                    client->addr);
+                    client->addr | ((client->flags & I2C_CLIENT_TEN)
+                                    ? 0xa000 : 0));
        status = device_register(&client->dev);
        if (status)
                goto out_err;
index c90ce50b619f7b483b85483a24457a8cab73f3c2..57a45ce84b2d42f893cf45fb6e876bc1904bcfde 100644 (file)
@@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
        return 0;
 }
 
-int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
+static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
                         void *data)
 {
        struct device *dev = data;
index 67cbcfa351225b4f7c60dce041562a411d76430e..847553fd8b963beca810aadb548d2f4f96db666e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
  *  Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
- *  Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
+ *  Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
  *
  * CYPRESS CY82C693 chipset IDE controller
  *
@@ -90,7 +90,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
        u8 time_16, time_8;
 
        /* select primary or secondary channel */
-       if (hwif->index > 0) {  /* drive is on the secondary channel */
+       if (drive->dn > 1) {  /* drive is on the secondary channel */
                dev = pci_get_slot(dev->bus, dev->devfn+1);
                if (!dev) {
                        printk(KERN_ERR "%s: tune_drive: "
@@ -141,7 +141,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
                pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
                pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
        }
-       if (hwif->index > 0)
+       if (drive->dn > 1)
                pci_dev_put(dev);
 }
 
index 4a697a238e280e2ce14c627d0d1f4bf053b86b45..8716066a2f2b79c1ddeb1236a9b2091d7bd62f63 100644 (file)
@@ -521,8 +521,8 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
        if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
                d.init_dma = icside_dma_init;
                d.port_ops = &icside_v6_port_ops;
+       } else
                d.dma_ops = NULL;
-       }
 
        ret = ide_host_register(host, &d, hws);
        if (ret)
index b59d04c720517d722633f9feef63a42505f86625..1892e81fb00f615b80c3dfaa8f10a643037a5eb6 100644 (file)
@@ -331,7 +331,7 @@ static const struct ide_port_ops ich_port_ops = {
                .udma_mask      = udma,                 \
        }
 
-#define DECLARE_ICH_DEV(udma) \
+#define DECLARE_ICH_DEV(mwdma, udma) \
        { \
                .name           = DRV_NAME, \
                .init_chipset   = init_chipset_ich, \
@@ -340,7 +340,7 @@ static const struct ide_port_ops ich_port_ops = {
                .port_ops       = &ich_port_ops, \
                .pio_mask       = ATA_PIO4, \
                .swdma_mask     = ATA_SWDMA2_ONLY, \
-               .mwdma_mask     = ATA_MWDMA12_ONLY, \
+               .mwdma_mask     = mwdma, \
                .udma_mask      = udma, \
        }
 
@@ -362,13 +362,15 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = {
        /* 2: PIIX4 */
        DECLARE_PIIX_DEV(ATA_UDMA2),
        /* 3: ICH0 */
-       DECLARE_ICH_DEV(ATA_UDMA2),
+       DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
        /* 4: ICH */
-       DECLARE_ICH_DEV(ATA_UDMA4),
+       DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
        /* 5: PIIX4 */
        DECLARE_PIIX_DEV(ATA_UDMA4),
-       /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
-       DECLARE_ICH_DEV(ATA_UDMA5),
+       /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
+       DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
+       /* 7: ICH7/7-R, no MWDMA1 */
+       DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
 };
 
 /**
@@ -438,9 +440,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
 #endif
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2),      6 },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19),    6 },
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21),    6 },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21),    7 },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1),  6 },
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18),    6 },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18),    7 },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6),     6 },
        { 0, },
 };
index e53a1b78378b02c414c0c292c954974c763d880d..281c91426345946279412880bc804d01d287ec4c 100644 (file)
@@ -113,12 +113,26 @@ static const struct pci_device_id triflex_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
 
+#ifdef CONFIG_PM
+static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+       /*
+        * We must not disable or powerdown the device.
+        * APM bios refuses to suspend if IDE is not accessible.
+        */
+       pci_save_state(dev);
+       return 0;
+}
+#else
+#define triflex_ide_pci_suspend NULL
+#endif
+
 static struct pci_driver triflex_pci_driver = {
        .name           = "TRIFLEX_IDE",
        .id_table       = triflex_pci_tbl,
        .probe          = triflex_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
+       .suspend        = triflex_ide_pci_suspend,
        .resume         = ide_pci_resume,
 };
 
index eb0e2ccc79ae6c3098cdb065511b047218b0c6a0..73d453159408d62725e81317001889071211f094 100644 (file)
@@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
 {
        dev->addr_len           = IEEE802154_ADDR_LEN;
        memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-       dev->features           = NETIF_F_NO_CSUM;
+       dev->features           = NETIF_F_HW_CSUM;
        dev->needed_tailroom    = 2; /* FCS */
        dev->mtu                = 127;
        dev->tx_queue_len       = 10;
index 691276bafd7812c168c4dadbe1aa315bf43bad80..1612cfd50f399072f4becbc3d985d4435ceb7910 100644 (file)
@@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req)
        mutex_unlock(&lock);
 }
 
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+       struct neighbour *n;
+       int ret;
+
+       rcu_read_lock();
+       n = dst_get_neighbour_noref(dst);
+       if (!n || !(n->nud_state & NUD_VALID)) {
+               if (n)
+                       neigh_event_send(n, NULL);
+               ret = -ENODATA;
+       } else {
+               ret = rdma_copy_addr(addr, dst->dev, n->ha);
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static int addr4_resolve(struct sockaddr_in *src_in,
                         struct sockaddr_in *dst_in,
                         struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        __be32 src_ip = src_in->sin_addr.s_addr;
        __be32 dst_ip = dst_in->sin_addr.s_addr;
        struct rtable *rt;
-       struct neighbour *neigh;
        struct flowi4 fl4;
        int ret;
 
@@ -214,18 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
                goto put;
        }
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
-       if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-               ret = -ENODATA;
-               if (neigh)
-                       goto release;
-               goto put;
-       }
-
-       ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
-       neigh_release(neigh);
+       ret = dst_fetch_ha(&rt->dst, addr);
 put:
        ip_rt_put(rt);
 out:
@@ -238,13 +245,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                         struct rdma_dev_addr *addr)
 {
        struct flowi6 fl6;
-       struct neighbour *neigh;
        struct dst_entry *dst;
        int ret;
 
        memset(&fl6, 0, sizeof fl6);
-       ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
-       ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+       fl6.daddr = dst_in->sin6_addr;
+       fl6.saddr = src_in->sin6_addr;
        fl6.flowi6_oif = addr->bound_dev_if;
 
        dst = ip6_route_output(&init_net, NULL, &fl6);
@@ -258,7 +264,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                        goto put;
 
                src_in->sin6_family = AF_INET6;
-               ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
+               src_in->sin6_addr = fl6.saddr;
        }
 
        if (dst->dev->flags & IFF_LOOPBACK) {
@@ -274,15 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                goto put;
        }
 
-       neigh = dst_get_neighbour(dst);
-       if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               if (neigh)
-                       neigh_event_send(neigh, NULL);
-               ret = -ENODATA;
-               goto put;
-       }
-
-       ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+       ret = dst_fetch_ha(dst, addr);
 put:
        dst_release(dst);
        return ret;
index 75ff821c0af07caaef3a0e48c8503489838bd75d..236a88c1ca87cf934d17bc318b44ea11e7b0a182 100644 (file)
@@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
        if (cma_zero_addr(src)) {
                dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
                if ((src->sa_family = dst->sa_family) == AF_INET) {
-                       ((struct sockaddr_in *) src)->sin_addr.s_addr =
-                               ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+                       ((struct sockaddr_in *)src)->sin_addr =
+                               ((struct sockaddr_in *)dst)->sin_addr;
                } else {
-                       ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
-                                      &((struct sockaddr_in6 *) dst)->sin6_addr);
+                       ((struct sockaddr_in6 *)src)->sin6_addr =
+                               ((struct sockaddr_in6 *)dst)->sin6_addr;
                }
        }
 
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 
        req.private_data_len = sizeof(struct cma_hdr) +
                               conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!req.private_data)
                return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
        memset(&req, 0, sizeof req);
        offset = cma_user_data_offset(id_priv->id.ps);
        req.private_data_len = offset + conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!private_data)
                return -ENOMEM;
index de6d0774e60990f644e39ca815159f68b5a065d1..740dcc065cf2f46a3f33c00db555f00b48736139 100644 (file)
@@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        struct iwch_ep *child_ep, *parent_ep = ctx;
        struct cpl_pass_accept_req *req = cplhdr(skb);
        unsigned int hwtid = GET_TID(req);
-       struct neighbour *neigh;
        struct dst_entry *dst;
        struct l2t_entry *l2t;
        struct rtable *rt;
@@ -1375,8 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                goto reject;
        }
        dst = &rt->dst;
-       neigh = dst_get_neighbour(dst);
-       l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+       l2t = t3_l2t_get(tdev, dst, NULL);
        if (!l2t) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -1887,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
        struct iwch_dev *h = to_iwch_dev(cm_id->device);
-       struct neighbour *neigh;
        struct iwch_ep *ep;
        struct rtable *rt;
        int err = 0;
@@ -1945,11 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                goto fail3;
        }
        ep->dst = &rt->dst;
-
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+       ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
index b36cdac9c558a35aa78f2061e9f7435dc1504754..0668bb3472d096ba1c0889bbc999b2076932ffe7 100644 (file)
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
                     (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
        mpa->private_data_size = htons(ep->plen);
        mpa->revision = mpa_rev_to_use;
-       if (mpa_rev_to_use == 1)
+       if (mpa_rev_to_use == 1) {
                ep->tried_with_mpa_v1 = 1;
+               ep->retry_with_mpa_v1 = 0;
+       }
 
        if (mpa_rev_to_use == 2) {
                mpa->private_data_size +=
@@ -1554,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req,
        return;
 }
 
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+                    struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+       struct neighbour *n;
+       int err, step;
+
+       rcu_read_lock();
+       n = dst_get_neighbour_noref(dst);
+       err = -ENODEV;
+       if (!n)
+               goto out;
+       err = -ENOMEM;
+       if (n->dev->flags & IFF_LOOPBACK) {
+               struct net_device *pdev;
+
+               pdev = ip_dev_find(&init_net, peer_ip);
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, pdev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = pdev->mtu;
+               ep->tx_chan = cxgb4_port_chan(pdev);
+               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(pdev) * step;
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->ctrlq_idx = cxgb4_port_idx(pdev);
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(pdev) * step];
+               dev_put(pdev);
+       } else {
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, n->dev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = dst_mtu(ep->dst);
+               ep->tx_chan = cxgb4_port_chan(n->dev);
+               ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+               ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(n->dev) * step];
+
+               if (clear_mpa_v1) {
+                       ep->retry_with_mpa_v1 = 0;
+                       ep->tried_with_mpa_v1 = 0;
+               }
+       }
+       err = 0;
+out:
+       rcu_read_unlock();
+
+       return err;
+}
+
 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *child_ep, *parent_ep;
@@ -1561,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int hwtid = GET_TID(req);
-       struct neighbour *neigh;
        struct dst_entry *dst;
-       struct l2t_entry *l2t;
        struct rtable *rt;
        __be32 local_ip, peer_ip;
        __be16 local_port, peer_port;
-       struct net_device *pdev;
-       u32 tx_chan, smac_idx;
-       u16 rss_qid;
-       u32 mtu;
-       int step;
-       int txq_idx, ctrlq_idx;
+       int err;
 
        parent_ep = lookup_stid(t, stid);
        PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1594,47 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
        dst = &rt->dst;
-       neigh = dst_get_neighbour(dst);
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               pdev = ip_dev_find(&init_net, peer_ip);
-               BUG_ON(!pdev);
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
-               mtu = pdev->mtu;
-               tx_chan = cxgb4_port_chan(pdev);
-               smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-               txq_idx = cxgb4_port_idx(pdev) * step;
-               ctrlq_idx = cxgb4_port_idx(pdev);
-               step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-               rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
-               mtu = dst_mtu(dst);
-               tx_chan = cxgb4_port_chan(neigh->dev);
-               smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-               txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-               rss_qid = dev->rdev.lldi.rxq_ids[
-                         cxgb4_port_idx(neigh->dev) * step];
-       }
-       if (!l2t) {
-               printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+
+       child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+       if (!child_ep) {
+               printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
                       __func__);
                dst_release(dst);
                goto reject;
        }
 
-       child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
-       if (!child_ep) {
-               printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+       err = import_ep(child_ep, peer_ip, dst, dev, false);
+       if (err) {
+               printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
-               cxgb4_l2t_release(l2t);
                dst_release(dst);
+               kfree(child_ep);
                goto reject;
        }
+
        state_set(&child_ep->com, CONNECTING);
        child_ep->com.dev = dev;
        child_ep->com.cm_id = NULL;
@@ -1647,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
        child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
-       child_ep->l2t = l2t;
        child_ep->dst = dst;
        child_ep->hwtid = hwtid;
-       child_ep->tx_chan = tx_chan;
-       child_ep->smac_idx = smac_idx;
-       child_ep->rss_qid = rss_qid;
-       child_ep->mtu = mtu;
-       child_ep->txq_idx = txq_idx;
-       child_ep->ctrlq_idx = ctrlq_idx;
 
        PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
-            tx_chan, smac_idx, rss_qid);
+            child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
 
        init_timer(&child_ep->timer);
        cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1788,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status)
 
 static int c4iw_reconnect(struct c4iw_ep *ep)
 {
-       int err = 0;
        struct rtable *rt;
-       struct net_device *pdev;
-       struct neighbour *neigh;
-       int step;
+       int err = 0;
 
        PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
        init_timer(&ep->timer);
@@ -1820,45 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
        }
        ep->dst = &rt->dst;
 
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               PDBG("%s LOOPBACK\n", __func__);
-               pdev = ip_dev_find(&init_net,
-                                  ep->com.cm_id->remote_addr.sin_addr.s_addr);
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, pdev, 0);
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(pdev) * step;
-               step = ep->com.dev->rdev.lldi.nrxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->ctrlq_idx = cxgb4_port_idx(pdev);
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, neigh->dev, 0);
-               ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(neigh->dev);
-               ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = ep->com.dev->rdev.lldi.nrxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(neigh->dev) * step];
-       }
-       if (!ep->l2t) {
+       err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+                       ep->dst, ep->com.dev, false);
+       if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-               err = -ENOMEM;
                goto fail4;
        }
 
@@ -2234,13 +2222,10 @@ err:
 
 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
-       int err = 0;
        struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
        struct c4iw_ep *ep;
        struct rtable *rt;
-       struct net_device *pdev;
-       struct neighbour *neigh;
-       int step;
+       int err = 0;
 
        if ((conn_param->ord > c4iw_max_read_depth) ||
            (conn_param->ird > c4iw_max_read_depth)) {
@@ -2301,47 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
 
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               PDBG("%s LOOPBACK\n", __func__);
-               pdev = ip_dev_find(&init_net,
-                                  cm_id->remote_addr.sin_addr.s_addr);
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, pdev, 0);
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(pdev) * step;
-               step = ep->com.dev->rdev.lldi.nrxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->ctrlq_idx = cxgb4_port_idx(pdev);
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                             cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, neigh->dev, 0);
-               ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(neigh->dev);
-               ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = ep->com.dev->rdev.lldi.nrxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                             cxgb4_port_idx(neigh->dev) * step];
-               ep->retry_with_mpa_v1 = 0;
-               ep->tried_with_mpa_v1 = 0;
-       }
-       if (!ep->l2t) {
+       err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+                       ep->dst, ep->com.dev, true);
+       if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-               err = -ENOMEM;
                goto fail4;
        }
 
index f35a935267e77e7a58c3be85437cbcb1a1f35cb7..0f1607c8325a5bc8a03e3a5d5471d39ed5741e8b 100644 (file)
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
        while (ptr != cq->sw_pidx) {
                cqe = &cq->sw_queue[ptr];
                if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
-                   (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+                   (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
                        (*count)++;
                if (++ptr == cq->size)
                        ptr = 0;
index f36da994a85abec20aa1b1eab19660ce00367e6e..95c94d8f02543ed89631f424a3c39c31c74cfa13 100644 (file)
@@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
 
        err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
                           in_modifier, op_modifier,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
 
        if (!err)
                memcpy(response_mad, outmailbox->buf, 256);
@@ -330,7 +331,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                return IB_MAD_RESULT_FAILURE;
 
        err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
-                          MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_WRAPPED);
        if (err)
                err = IB_MAD_RESULT_FAILURE;
        else {
index 77f3dbc0aaa1629783bfadb84d0ac5ad754e8207..7b445df6a667be9a595ae1f8ed9449e6419ce16a 100644 (file)
@@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
 {
        struct mlx4_dev *dev = to_mdev(device)->dev;
 
-       return dev->caps.port_mask & (1 << (port_num - 1)) ?
+       return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
                IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
 }
 
@@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
        memset(mailbox->buf, 0, 256);
        memcpy(mailbox->buf, props->node_desc, 64);
        mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
-                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
 
@@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
        }
 
        err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev->dev, mailbox);
        return err;
@@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work)
        memcpy(gids, gw->gids, sizeof gw->gids);
 
        err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
        if (err)
                printk(KERN_WARNING "set port command failed\n");
        else {
@@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        printk_once(KERN_INFO "%s", mlx4_ib_version);
 
+       if (mlx4_is_mfunc(dev)) {
+               printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+               return NULL;
+       }
+
        mlx4_foreach_ib_transport_port(i, dev)
                num_ports++;
 
@@ -1244,7 +1250,8 @@ err_reg:
 
 err_counter:
        for (; i; --i)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+               if (ibdev->counters[i - 1] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
 
 err_map:
        iounmap(ibdev->uar_map);
@@ -1275,7 +1282,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        }
        iounmap(ibdev->uar_map);
        for (p = 0; p < ibdev->num_ports; ++p)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+               if (ibdev->counters[p] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
        mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
                mlx4_CLOSE_PORT(dev, p);
 
index dfce9ea98a39b3f0f68e6d152d438554ebe8ee84..b1e6cae5f47ed20b21e6f0da31bc9b89129a02fe 100644 (file)
@@ -1348,7 +1348,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        else
                netdev = nesvnic->netdev;
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+       rcu_read_lock();
+       neigh = dst_get_neighbour_noref(&rt->dst);
        if (neigh) {
                if (neigh->nud_state & NUD_VALID) {
                        nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,7 +1360,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                                if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
                                            neigh->ha, ETH_ALEN)) {
                                        /* Mac address same as in nes_arp_table */
-                                       neigh_release(neigh);
                                        ip_rt_put(rt);
                                        return rc;
                                }
@@ -1373,13 +1373,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                                             dst_ip, NES_ARP_ADD);
                        rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
                                           NES_ARP_RESOLVE);
+               } else {
+                       neigh_event_send(neigh, NULL);
                }
-               neigh_release(neigh);
        }
-
-       if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
-               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+       rcu_read_unlock();
        ip_rt_put(rt);
        return rc;
 }
index c00d2f3f8966b5084ec5b5875845884b6b294789..4b3fa711a2470edc4485f286a3ae4ced7adec74b 100644 (file)
@@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
        .set_pauseparam = nes_netdev_set_pauseparam,
 };
 
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
 {
        struct nes_adapter *nesadapter = nesdev->nesadapter;
        u32 u32temp;
@@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
        spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
 }
 
-static u32 nes_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -1624,7 +1624,7 @@ static u32 nes_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int nes_set_features(struct net_device *netdev, u32 features)
+static int nes_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct nes_vnic *nesvnic = netdev_priv(netdev);
        struct nes_device *nesdev = nesvnic->nesdev;
index 574600ef5b428e4766d8cd431632a0e131c176a9..a7403248d83dee7c14ce9578618872c745fdffcd 100644 (file)
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
        strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
        ctxt_fp(fp) = rcd;
        qib_stats.sps_ctxts++;
-       dd->freectxts++;
+       dd->freectxts--;
        ret = 0;
        goto bail;
 
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
                if (dd->pageshadow)
                        unlock_expected_tids(rcd);
                qib_stats.sps_ctxts--;
-               dd->freectxts--;
+               dd->freectxts++;
        }
 
        mutex_unlock(&qib_mutex);
index 5bd2162b95dcb8c93051ce7e496b628222764184..1d5895941e193e35e5ff88804fb00a9c29c8a5fa 100644 (file)
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
                SYM_LSB(IBCCtrlA_0, MaxPktLen);
        ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
 
-       /* initially come up waiting for TS1, without sending anything. */
-       val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
-               QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
-       ppd->cpspec->ibcctrl_a = val;
        /*
         * Reset the PCS interface to the serdes (and also ibc, which is still
         * in reset from above).  Writes new value of ibcctrl_a as last step.
         */
        qib_7322_mini_pcs_reset(ppd);
-       qib_write_kreg(dd, kr_scratch, 0ULL);
-       /* clear the linkinit cmds */
-       ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
 
        if (!ppd->cpspec->ibcctrl_b) {
                unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
        ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
        set_vls(ppd);
 
+       /* initially come up DISABLED, without sending anything. */
+       val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+                                       QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+       qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+       qib_write_kreg(dd, kr_scratch, 0ULL);
+       /* clear the linkinit cmds */
+       ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
        /* be paranoid against later code motion, etc. */
        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
        ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
                           off */
                        if (ppd->dd->flags & QIB_HAS_QSFP) {
                                qd->t_insert = get_jiffies_64();
-                               schedule_work(&qd->work);
+                               queue_work(ib_wq, &qd->work);
                        }
                        spin_lock_irqsave(&ppd->sdma_lock, flags);
                        if (__qib_sdma_running(ppd))
index e06c4ed383f14598674ce8a0847c9780a6231c46..fa71b1e666c5414fbba2357fe531e75cabc7986c 100644 (file)
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
        udelay(20); /* Generous RST dwell */
 
        dd->f_gpio_mod(dd, mask, mask, mask);
-       /* Spec says module can take up to two seconds! */
-       mask = QSFP_GPIO_MOD_PRS_N;
-       if (qd->ppd->hw_pidx)
-               mask <<= QSFP_GPIO_PORT2_SHIFT;
-
-       /* Do not try to wait here. Better to let event handle it */
-       if (!qib_qsfp_mod_present(qd->ppd))
-               goto bail;
-       /* We see a module, but it may be unwise to look yet. Just schedule */
-       qd->t_insert = get_jiffies_64();
-       queue_work(ib_wq, &qd->work);
-bail:
        return;
 }
 
index 0ef9af94997dcd5737922aa37a1483491b0c63f1..4115be54ba3b32626dc75c4529a3aa186b6f02c1 100644 (file)
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
                                 struct ib_pd *pd, struct ib_ah_attr *attr)
 {
        struct ipoib_ah *ah;
+       struct ib_ah *vah;
 
        ah = kmalloc(sizeof *ah, GFP_KERNEL);
        if (!ah)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ah->dev       = dev;
        ah->last_send = 0;
        kref_init(&ah->ref);
 
-       ah->ah = ib_create_ah(pd, attr);
-       if (IS_ERR(ah->ah)) {
+       vah = ib_create_ah(pd, attr);
+       if (IS_ERR(vah)) {
                kfree(ah);
-               ah = NULL;
-       } else
+               ah = (struct ipoib_ah *)vah;
+       } else {
+               ah->ah = vah;
                ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+       }
 
        return ah;
 }
index 7567b60002309a19a2d0a4cc13da855a46042be4..3514ca05deea34439f8b1ecc992a50c5e66d5d06 100644 (file)
@@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev)
        return 0;
 }
 
-static u32 ipoib_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       if (ah) {
+       if (!IS_ERR_OR_NULL(ah)) {
                path->pathrec = *pathrec;
 
                old_ah   = path->ah;
@@ -555,15 +555,14 @@ static int path_rec_start(struct net_device *dev,
        return 0;
 }
 
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_path *path;
        struct ipoib_neigh *neigh;
-       struct neighbour *n;
        unsigned long flags;
 
-       n = dst_get_neighbour(skb_dst(skb));
        neigh = ipoib_neigh_alloc(n, skb->dev);
        if (!neigh) {
                ++dev->stats.tx_dropped;
@@ -636,16 +635,14 @@ err_drop:
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
-       struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *n;
 
        /* Look up path record for unicasts */
-       n = dst_get_neighbour(dst);
        if (n->ha[4] != 0xff) {
-               neigh_add_path(skb, dev);
+               neigh_add_path(skb, n, dev);
                return;
        }
 
@@ -720,13 +717,19 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct neighbour *n = NULL;
        unsigned long flags;
 
-       if (likely(skb_dst(skb)))
-               n = dst_get_neighbour(skb_dst(skb));
-
+       rcu_read_lock();
+       if (likely(skb_dst(skb))) {
+               n = dst_get_neighbour_noref(skb_dst(skb));
+               if (!n) {
+                       ++dev->stats.tx_dropped;
+                       dev_kfree_skb_any(skb);
+                       goto unlock;
+               }
+       }
        if (likely(n)) {
                if (unlikely(!*to_ipoib_neigh(n))) {
-                       ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       ipoib_path_lookup(skb, n, dev);
+                       goto unlock;
                }
 
                neigh = *to_ipoib_neigh(n);
@@ -748,18 +751,18 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        list_del(&neigh->list);
                        ipoib_neigh_free(dev, neigh);
                        spin_unlock_irqrestore(&priv->lock, flags);
-                       ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       ipoib_path_lookup(skb, n, dev);
+                       goto unlock;
                }
 
                if (ipoib_cm_get(neigh)) {
                        if (ipoib_cm_up(neigh)) {
                                ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
                } else if (neigh->ah) {
                        ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
 
                if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                           phdr->hwaddr + 4);
                                dev_kfree_skb_any(skb);
                                ++dev->stats.tx_dropped;
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
 
                        unicast_arp_send(skb, dev, phdr);
                }
        }
-
+unlock:
+       rcu_read_unlock();
        return NETDEV_TX_OK;
 }
 
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
        dst = skb_dst(skb);
        n = NULL;
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref_raw(dst);
        if ((!dst || !n) && daddr) {
                struct ipoib_pseudoheader *phdr =
                        (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
@@ -1218,6 +1222,8 @@ static struct net_device *ipoib_add_port(const char *format,
        priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
        priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
 
+       priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
        result = ib_query_pkey(hca, port, 0, &priv->pkey);
        if (result) {
                printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
index 1b7a9768635673f1a78f74e54e9e33fd37a80eb7..f7ff9dd66cda319bdbde38e0e6ce92cab6fe3216 100644 (file)
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                av.grh.dgid = mcast->mcmember.mgid;
 
                ah = ipoib_create_ah(dev, priv->pd, &av);
-               if (!ah) {
-                       ipoib_warn(priv, "ib_address_create failed\n");
+               if (IS_ERR(ah)) {
+                       ipoib_warn(priv, "ib_address_create failed %ld\n",
+                               -PTR_ERR(ah));
+                       /* use original error */
+                       return PTR_ERR(ah);
                } else {
                        spin_lock_irq(&priv->lock);
                        mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
 
                skb->dev = dev;
                if (dst)
-                       n = dst_get_neighbour(dst);
+                       n = dst_get_neighbour_noref_raw(dst);
                if (!dst || !n) {
                        /* put pseudoheader back on for next time */
                        skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,8 +725,10 @@ out:
        if (mcast && mcast->ah) {
                struct dst_entry *dst = skb_dst(skb);
                struct neighbour *n = NULL;
+
+               rcu_read_lock();
                if (dst)
-                       n = dst_get_neighbour(dst);
+                       n = dst_get_neighbour_noref(dst);
                if (n && !*to_ipoib_neigh(n)) {
                        struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
                                                                      skb->dev);
@@ -734,7 +739,7 @@ out:
                                list_add_tail(&neigh->list, &mcast->neigh_list);
                        }
                }
-
+               rcu_read_unlock();
                spin_unlock_irqrestore(&priv->lock, flags);
                ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
                return;
index 80793f1608eb0d1fd2eaff655cba48ffcc037709..06517e60e50c1e64742083eb1371fb7a328bd929 100644 (file)
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
 static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
 {
        struct cma3000_accl_data *data = dev_id;
-       int datax, datay, dataz;
-       u8 ctrl, mode, range, intr_status;
+       int datax, datay, dataz, intr_status;
+       u8 ctrl, mode, range;
 
        intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
        if (intr_status < 0)
index 09b93b11a274278b399e0adbfb1dc8e6917e35e9..e2a9867c19d52fce53cac578bdaf265d3211bba1 100644 (file)
@@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse)
  */
 static int elantech_set_properties(struct elantech_data *etd)
 {
+       /* This represents the version of IC body. */
        int ver = (etd->fw_version & 0x0f0000) >> 16;
 
+       /* Early version of Elan touchpads doesn't obey the rule. */
        if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600)
                etd->hw_version = 1;
-       else if (etd->fw_version < 0x150600)
-               etd->hw_version = 2;
-       else if (ver == 5)
-               etd->hw_version = 3;
-       else if (ver == 6)
-               etd->hw_version = 4;
-       else
-               return -1;
+       else {
+               switch (ver) {
+               case 2:
+               case 4:
+                       etd->hw_version = 2;
+                       break;
+               case 5:
+                       etd->hw_version = 3;
+                       break;
+               case 6:
+                       etd->hw_version = 4;
+                       break;
+               default:
+                       return -1;
+               }
+       }
 
        /*
         * Turn on packet checking by default.
index c5b12d2e955a5cdb8c968c3e31d86bcb99e416b3..86d6f39178b0d556364df951ca33575598bfa416 100644 (file)
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
        ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
 
        if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-               return -1;
+               goto out;
 
        if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
                /* inversion is required */
@@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
        ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
 
        if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-               return -1;
+               goto out;
 
        if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
                ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
        };
        int val;
 
-       if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+       if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
                return -EIO;
 
        *btn = buttons[(val & 0x30) >> 4];
index ed1395ac7b8b3e11960ae51c7c92c7f51b615fc9..2e4af24f8c1586b6ecfb3c7f8325cbeb34eb4ae5 100644 (file)
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
 /* Finger-sensing Pad control registers */
 #define        FSP_REG_SYSCTL1         0x10
 #define        FSP_BIT_EN_REG_CLK      BIT(5)
+#define        FSP_REG_TMOD_STATUS     0x20
 #define        FSP_REG_OPC_QDOWN       0x31
 #define        FSP_BIT_EN_OPC_TAG      BIT(7)
 #define        FSP_REG_OPTZ_XLO        0x34
index c080b828e5dc5e2f69c8cf859deebe3d6abbae61..a6dcd18e9adf93b5b97cd00e04b6419a6788e1e5 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/input/mt.h>
 #include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 
        do {
                psmouse_reset(psmouse);
+               if (retry) {
+                       /*
+                        * On some boxes, right after resuming, the touchpad
+                        * needs some time to finish initializing (I assume
+                        * it needs time to calibrate) and start responding
+                        * to Synaptics-specific queries, so let's wait a
+                        * bit.
+                        */
+                       ssleep(1);
+               }
                error = synaptics_detect(psmouse, 0);
        } while (error && ++retry < 3);
 
index 4b2a42f9f0bb471748ee3d6676da48255fd73231..d4d08bd9205b87b7ddc617c55fee84ec6259a2b8 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/serio.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 
 #include <asm/mach-types.h>
 #include <plat/board-ams-delta.h>
index bb9f5d31f0d0616463609f4b20c940ee0ec2d451..b4cfc6c8be89db327134dd7dd722dfa2623d472a 100644 (file)
@@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
                },
        },
+       {
+               /* Newer HP Pavilion dv4 models */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+               },
+       },
        { }
 };
 
@@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
                },
        },
+       {
+               /* Newer HP Pavilion dv4 models */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+               },
+       },
        { }
 };
 
index da0d8761e778cfd8f79e64b26a0ce3acd8cf60c5..2ee47d01a3b4ecde112b07ac95ccd798f053e4f4 100644 (file)
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
 static const struct wacom_features wacom_features_0xE6 =
        { "Wacom ISDv4 E6",       WACOM_PKGLEN_TPC2FG,    27760, 15694,  255,
          0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+       { "Wacom ISDv4 EC",       WACOM_PKGLEN_GRAPHIRE,  25710, 14500,  255,
+         0, TABLETPC,    WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x47 =
        { "Wacom Intuos2 6x8",    WACOM_PKGLEN_INTUOS,    20320, 16240, 1023,
          31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xE2) },
        { USB_DEVICE_WACOM(0xE3) },
        { USB_DEVICE_WACOM(0xE6) },
+       { USB_DEVICE_WACOM(0xEC) },
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index c0c7820d4c46b406465e0d2d8e059a80ce819476..bdc447fd4766fbba47f46f49301a3a4758639289 100644 (file)
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
 static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -3524,7 +3527,7 @@ found:
        return 0;
 }
 
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
 {
        struct dmar_rmrr_unit *rmrr, *rmrr_n;
        struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
 
        bus_register_notifier(&pci_bus_type, &device_nb);
 
+       intel_iommu_enabled = 1;
+
        return 0;
 }
 
index 07c9f189f3143250e5ea2d0ea20fcdaa2b1674df..6777ca049471728d445ec323e3f051bdc19126f9 100644 (file)
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
        return ir_supported;
 }
 
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
 {
        if (!intr_remapping_enabled)
                return 0;
index 2fb2963df55376a3a8efbf09490457e08b28b836..5b5fa5cdaa3108da74b7358ae187dd4ee8a00181 100644 (file)
@@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
        if (bus == NULL || bus->iommu_ops == NULL)
                return NULL;
 
-       domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
        if (!domain)
                return NULL;
 
index 33ec9e4677727800d5439e16300be820c92090b8..9021182c4b766e02454365f1e2b6d22822358f8a 100644 (file)
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
                case IIOCDOCFINT:
                        if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
                                return (-EINVAL);       /* invalid driver */
+                       if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+                                       sizeof(dioctl.cf_ctrl.msn))
+                               return -EINVAL;
+                       if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+                                       sizeof(dioctl.cf_ctrl.fwd_nr))
+                               return -EINVAL;
                        if ((i = cf_command(dioctl.cf_ctrl.drvid,
                                            (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
                                            dioctl.cf_ctrl.cfproc,
index 04231cb2f031a6148490e916e93308c0ab391bf2..1793ba1b6a89ebdecf777176434dea6e82622573 100644 (file)
@@ -624,8 +624,6 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
 {
        isdn_if *iif;
 
-       pr_info("ISDN4Linux interface\n");
-
        iif = kmalloc(sizeof *iif, GFP_KERNEL);
        if (!iif) {
                pr_err("out of memory\n");
@@ -684,6 +682,7 @@ void gigaset_isdn_unregdev(struct cardstate *cs)
  */
 void gigaset_isdn_regdrv(void)
 {
+       pr_info("ISDN4Linux interface\n");
        /* nothing to do */
 }
 
index 1f73d7f7e0242e4e73d55582935f4ff0a11f6646..2339d7396b9ea305dd845e592d227bd726d3d43b 100644 (file)
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
                        char *c,
                        *e;
 
+                       if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+                                       sizeof(cfg->drvid))
+                               return -EINVAL;
                        drvidx = -1;
                        chidx = -1;
                        strcpy(drvid, cfg->drvid);
index 661b692573e7790e7d75a4b815cd7741dfe47c74..6d5628bb060115d6e3f032210f9bc862a0ef397b 100644 (file)
@@ -270,11 +270,8 @@ void led_blink_set(struct led_classdev *led_cdev,
        del_timer_sync(&led_cdev->blink_timer);
 
        if (led_cdev->blink_set &&
-           !led_cdev->blink_set(led_cdev, delay_on, delay_off)) {
-               led_cdev->blink_delay_on = *delay_on;
-               led_cdev->blink_delay_off = *delay_off;
+           !led_cdev->blink_set(led_cdev, delay_on, delay_off))
                return;
-       }
 
        /* blink with 1 Hz as default if nothing specified */
        if (!*delay_on && !*delay_off)
index 0dc30ffde5ad7a8defc591c42a251bb9e59c35a9..595d7319701680d9360e9f252d9f0273e7de8726 100644 (file)
@@ -381,6 +381,11 @@ error:
        return PTR_ERR(vqs[i]);
 }
 
+static const char *lg_bus_name(struct virtio_device *vdev)
+{
+       return "";
+}
+
 /* The ops structure which hooks everything together. */
 static struct virtio_config_ops lguest_config_ops = {
        .get_features = lg_get_features,
@@ -392,6 +397,7 @@ static struct virtio_config_ops lguest_config_ops = {
        .reset = lg_reset,
        .find_vqs = lg_find_vqs,
        .del_vqs = lg_del_vqs,
+       .bus_name = lg_bus_name,
 };
 
 /*
index 7878712721bf431a1315f44db1c3b2dcc9486245..6d03774b176ec8236d9cfb72654bedf600f052d2 100644 (file)
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
         */
        int i;
 
+       spin_lock_irq(&bitmap->lock);
        for (i = 0; i < bitmap->file_pages; i++)
                set_page_attr(bitmap, bitmap->filemap[i],
                              BITMAP_PAGE_NEEDWRITE);
        bitmap->allclean = 0;
+       spin_unlock_irq(&bitmap->lock);
 }
 
 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1391,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                         atomic_read(&bitmap->behind_writes),
                         bitmap->mddev->bitmap_info.max_write_behind);
        }
-       if (bitmap->mddev->degraded)
-               /* Never clear bits or update events_cleared when degraded */
-               success = 0;
 
        while (sectors) {
                sector_t blocks;
@@ -1407,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        return;
                }
 
-               if (success &&
+               if (success && !bitmap->mddev->degraded &&
                    bitmap->events_cleared < bitmap->mddev->events) {
                        bitmap->events_cleared = bitmap->mddev->events;
                        bitmap->need_sync = 1;
@@ -1605,7 +1604,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
        for (chunk = s; chunk <= e; chunk++) {
                sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
                bitmap_set_memory_bits(bitmap, sec, 1);
+               spin_lock_irq(&bitmap->lock);
                bitmap_file_set_bit(bitmap, sec);
+               spin_unlock_irq(&bitmap->lock);
                if (sec < bitmap->mddev->recovery_cp)
                        /* We are asserting that the array is dirty,
                         * so move the recovery_cp address back so
index c3273efd08cb6dce7a19cd8d193e98ff23600bb7..627456542fb3d0d1f1d18780db24a5531b29ce32 100644 (file)
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
                return -EINVAL;
 
        rdev->raid_disk = rdev->saved_raid_disk;
+       rdev->saved_raid_disk = -1;
 
        newconf = linear_conf(mddev,mddev->raid_disks+1);
 
index 84acfe7d10e48e33ea581924d4648948d005d90c..f47f1f8ac44bc16677b212f35b398dae093dab87 100644 (file)
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
            mddev->ctime == 0 && !mddev->hold_active) {
                /* Array is not configured at all, and not held active,
                 * so destroy it */
-               list_del(&mddev->all_mddevs);
+               list_del_init(&mddev->all_mddevs);
                bs = mddev->bio_set;
                mddev->bio_set = NULL;
                if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
                sep = ",";
        }
        if (test_bit(Blocked, &rdev->flags) ||
-           rdev->badblocks.unacked_exist) {
+           (rdev->badblocks.unacked_exist
+            && !test_bit(Faulty, &rdev->flags))) {
                len += sprintf(page+len, "%sblocked", sep);
                sep = ",";
        }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        if (err)
                return err;
        else {
+               if (mddev->hold_active == UNTIL_IOCTL)
+                       mddev->hold_active = 0;
                sysfs_notify_dirent_safe(mddev->sysfs_state);
                return len;
        }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 
        if (!entry->show)
                return -EIO;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
+
        rv = mddev_lock(mddev);
        if (!rv) {
                rv = entry->show(mddev, page);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
                return -EIO;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
        rv = mddev_lock(mddev);
-       if (mddev->hold_active == UNTIL_IOCTL)
-               mddev->hold_active = 0;
        if (!rv) {
                rv = entry->store(mddev, page, length);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -7342,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
                                        spares++;
                                        md_new_event(mddev);
                                        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-                               } else
-                                       break;
+                               }
                        }
                }
        }
@@ -7840,6 +7857,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                                  s + rdev->data_offset, sectors, acknowledged);
        if (rv) {
                /* Make sure they get written out promptly */
+               sysfs_notify_dirent_safe(rdev->sysfs_state);
                set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
                md_wakeup_thread(rdev->mddev->thread);
        }
index 297e260921787f490b63ddf88a9ea5adbdfd82c4..858fdbb7eb07a24ceb18a4d6214218c9205089e7 100644 (file)
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                if (dev->written)
                        s->written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = NULL;
                if (rdev) {
                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
                                             &first_bad, &bad_sectors);
@@ -3063,12 +3065,18 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else if (!test_bit(Faulty, &rdev->flags)) {
+               else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                        /* in sync if before recovery_offset */
-                       if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
-                               set_bit(R5_Insync, &dev->flags);
-               }
-               if (test_bit(R5_WriteError, &dev->flags)) {
+                       set_bit(R5_Insync, &dev->flags);
+               else if (test_bit(R5_UPTODATE, &dev->flags) &&
+                        test_bit(R5_Expanded, &dev->flags))
+                       /* If we've reshaped into here, we assume it is Insync.
+                        * We will shortly update recovery_offset to make
+                        * it official.
+                        */
+                       set_bit(R5_Insync, &dev->flags);
+
+               if (rdev && test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
@@ -3076,7 +3084,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        } else
                                clear_bit(R5_WriteError, &dev->flags);
                }
-               if (test_bit(R5_MadeGood, &dev->flags)) {
+               if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
                                atomic_inc(&rdev->nr_pending);
index 7eb1bf75cd072ae634b816a8a496ccfd8a559127..5d02221e99dd973ad674e56567bab4b369fae7ac 100644 (file)
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
 
 static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
 {
+       u8 buf[2] = { 0xfb, reg };
        struct i2c_msg msg[] = {
                { .addr = state->i2c_props.addr, .flags = 0,
-                 .buf = &reg, .len = 1 },
+                 .buf = buf, .len = 2 },
                { .addr = state->i2c_props.addr, .flags = I2C_M_RD,
                  .buf = val, .len = 1 },
        };
index aacfe2387e28393d7eaf1f436cbc06afd0d29182..4fc29730a12ccf2305739c34efa625deef0a1f95 100644 (file)
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
        switch (params->u.ofdm.bandwidth) {
        case BANDWIDTH_6_MHZ:
                LP_Fc = 0;
-               LO_Frac = params->frequency + 4000000;
+               LO_Frac = params->frequency + 3000000;
                break;
        case BANDWIDTH_7_MHZ:
                LP_Fc = 1;
index 303f22ea04c075792cd2953628230c82d9e01160..01bb8daf4b09f43c8b153c7a099e85d851e1b249 100644 (file)
@@ -189,7 +189,7 @@ struct ati_remote {
        dma_addr_t inbuf_dma;
        dma_addr_t outbuf_dma;
 
-       unsigned char old_data[2];  /* Detect duplicate events */
+       unsigned char old_data;     /* Detect duplicate events */
        unsigned long old_jiffies;
        unsigned long acc_jiffies;  /* handle acceleration */
        unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
 /* Translation table from hardware messages to input events. */
 static const struct {
        short kind;
-       unsigned char data1, data2;
+       unsigned char data;
        int type;
        unsigned int code;
        int value;
 }  ati_remote_tbl[] = {
        /* Directional control pad axes */
-       {KIND_ACCEL,   0x35, 0x70, EV_REL, REL_X, -1},   /* left */
-       {KIND_ACCEL,   0x36, 0x71, EV_REL, REL_X, 1},    /* right */
-       {KIND_ACCEL,   0x37, 0x72, EV_REL, REL_Y, -1},   /* up */
-       {KIND_ACCEL,   0x38, 0x73, EV_REL, REL_Y, 1},    /* down */
+       {KIND_ACCEL,   0x70, EV_REL, REL_X, -1},   /* left */
+       {KIND_ACCEL,   0x71, EV_REL, REL_X, 1},    /* right */
+       {KIND_ACCEL,   0x72, EV_REL, REL_Y, -1},   /* up */
+       {KIND_ACCEL,   0x73, EV_REL, REL_Y, 1},    /* down */
        /* Directional control pad diagonals */
-       {KIND_LU,      0x39, 0x74, EV_REL, 0, 0},        /* left up */
-       {KIND_RU,      0x3a, 0x75, EV_REL, 0, 0},        /* right up */
-       {KIND_LD,      0x3c, 0x77, EV_REL, 0, 0},        /* left down */
-       {KIND_RD,      0x3b, 0x76, EV_REL, 0, 0},        /* right down */
+       {KIND_LU,      0x74, EV_REL, 0, 0},        /* left up */
+       {KIND_RU,      0x75, EV_REL, 0, 0},        /* right up */
+       {KIND_LD,      0x77, EV_REL, 0, 0},        /* left down */
+       {KIND_RD,      0x76, EV_REL, 0, 0},        /* right down */
 
        /* "Mouse button" buttons */
-       {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
-       {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
-       {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
-       {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+       {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+       {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+       {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+       {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
 
        /* Artificial "doubleclick" events are generated by the hardware.
         * They are mapped to the "side" and "extra" mouse buttons here. */
-       {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
-       {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+       {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+       {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
 
        /* Non-mouse events are handled by rc-core */
-       {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+       {KIND_END, 0x00, EV_MAX + 1, 0, 0}
 };
 
 /* Local function prototypes */
@@ -396,25 +396,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
        return retval;
 }
 
-/*
- *     ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
-       int i;
-
-       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
-               /*
-                * Decide if the table entry matches the remote input.
-                */
-               if (ati_remote_tbl[i].data1 == d1 &&
-                   ati_remote_tbl[i].data2 == d2)
-                       return i;
-
-       }
-       return -1;
-}
-
 /*
  *     ati_remote_compute_accel
  *
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
        int index = -1;
        int acc;
        int remote_num;
-       unsigned char scancode[2];
+       unsigned char scancode;
+       int i;
+
+       /*
+        * data[0] = 0x14
+        * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+        * data[2] = the key code (with toggle bit in MSB with some models)
+        * data[3] = channel << 4 (the low 4 bits must be zero)
+        */
 
        /* Deal with strange looking inputs */
        if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
+       if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+               dbginfo(&ati_remote->interface->dev,
+                       "wrong checksum in input: %02x %02x %02x %02x\n",
+                       data[0], data[1], data[2], data[3]);
+               return;
+       }
+
        /* Mask unwanted remote channels.  */
        /* note: remote_num is 0-based, channel 1 on remote == 0 here */
        remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
-       scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
        /*
-        * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
-        * so we have to clear them. The first bit is a bit tricky as the
-        * "non-toggled" state depends on remote_num, so we xor it with the
-        * second bit which is only used for toggle.
+        * MSB is a toggle code, though only used by some devices
+        * (e.g. SnapStream Firefly)
         */
-       scancode[0] ^= (data[2] & 0x80);
-
-       scancode[1] = data[2] & ~0x80;
+       scancode = data[2] & 0x7f;
 
-       /* Look up event code index in mouse translation table. */
-       index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+       /* Look up event code index in the mouse translation table. */
+       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+               if (scancode == ati_remote_tbl[i].data) {
+                       index = i;
+                       break;
+               }
+       }
 
        if (index >= 0) {
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
-                       remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+                       "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+                       remote_num, data[2], index, ati_remote_tbl[index].code);
                if (!dev)
                        return; /* no mouse device */
        } else
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
-                       remote_num, data[1], data[2], scancode[0], scancode[1]);
+                       "channel 0x%02x; key data %02x, scancode %02x\n",
+                       remote_num, data[2], scancode);
 
 
        if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
                unsigned long now = jiffies;
 
                /* Filter duplicate events which happen "too close" together. */
-               if (ati_remote->old_data[0] == data[1] &&
-                   ati_remote->old_data[1] == data[2] &&
+               if (ati_remote->old_data == data[2] &&
                    time_before(now, ati_remote->old_jiffies +
                                     msecs_to_jiffies(repeat_filter))) {
                        ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
                        ati_remote->first_jiffies = now;
                }
 
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
                ati_remote->old_jiffies = now;
 
                /* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
 
                if (index < 0) {
                        /* Not a mouse event, hand it to rc-core. */
-                       u32 rc_code = (scancode[0] << 8) | scancode[1];
 
                        /*
                         * We don't use the rc-core repeat handling yet as
                         * it would cause ghost repeats which would be a
                         * regression for this driver.
                         */
-                       rc_keydown_notimeout(ati_remote->rdev, rc_code,
+                       rc_keydown_notimeout(ati_remote->rdev, scancode,
                                             data[2]);
                        rc_keyup(ati_remote->rdev);
                        return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
                input_sync(dev);
 
                ati_remote->old_jiffies = jiffies;
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
        }
 }
 
index e1b8b2605c48b6dd1a65a604a65f40c706a5a8ce..81506440eded45d7b9db84462e9b0874eb38ca61 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table ati_x10[] = {
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xc500, KEY_A },
-       { 0xc601, KEY_B },
-       { 0xde19, KEY_C },
-       { 0xe01b, KEY_D },
-       { 0xe621, KEY_E },
-       { 0xe823, KEY_F },
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x00, KEY_A },
+       { 0x01, KEY_B },
+       { 0x19, KEY_C },
+       { 0x1b, KEY_D },
+       { 0x21, KEY_E },
+       { 0x23, KEY_F },
 
-       { 0xdd18, KEY_KPENTER },    /* "check" */
-       { 0xdb16, KEY_MENU },       /* "menu" */
-       { 0xc702, KEY_POWER },      /* Power */
-       { 0xc803, KEY_TV },         /* TV */
-       { 0xc904, KEY_DVD },        /* DVD */
-       { 0xca05, KEY_WWW },        /* WEB */
-       { 0xcb06, KEY_BOOKMARKS },  /* "book" */
-       { 0xcc07, KEY_EDIT },       /* "hand" */
-       { 0xe11c, KEY_COFFEE },     /* "timer" */
-       { 0xe520, KEY_FRONT },      /* "max" */
-       { 0xe21d, KEY_LEFT },       /* left */
-       { 0xe41f, KEY_RIGHT },      /* right */
-       { 0xe722, KEY_DOWN },       /* down */
-       { 0xdf1a, KEY_UP },         /* up */
-       { 0xe31e, KEY_OK },         /* "OK" */
-       { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
-       { 0xcd08, KEY_VOLUMEUP },   /* VOL - */
-       { 0xcf0a, KEY_MUTE },       /* MUTE  */
-       { 0xd00b, KEY_CHANNELUP },  /* CH + */
-       { 0xd10c, KEY_CHANNELDOWN },/* CH - */
-       { 0xec27, KEY_RECORD },     /* ( o) red */
-       { 0xea25, KEY_PLAY },       /* ( >) */
-       { 0xe924, KEY_REWIND },     /* (<<) */
-       { 0xeb26, KEY_FORWARD },    /* (>>) */
-       { 0xed28, KEY_STOP },       /* ([]) */
-       { 0xee29, KEY_PAUSE },      /* ('') */
-       { 0xf02b, KEY_PREVIOUS },   /* (<-) */
-       { 0xef2a, KEY_NEXT },       /* (>+) */
-       { 0xf22d, KEY_INFO },       /* PLAYING */
-       { 0xf32e, KEY_HOME },       /* TOP */
-       { 0xf42f, KEY_END },        /* END */
-       { 0xf530, KEY_SELECT },     /* SELECT */
+       { 0x18, KEY_KPENTER },    /* "check" */
+       { 0x16, KEY_MENU },       /* "menu" */
+       { 0x02, KEY_POWER },      /* Power */
+       { 0x03, KEY_TV },         /* TV */
+       { 0x04, KEY_DVD },        /* DVD */
+       { 0x05, KEY_WWW },        /* WEB */
+       { 0x06, KEY_BOOKMARKS },  /* "book" */
+       { 0x07, KEY_EDIT },       /* "hand" */
+       { 0x1c, KEY_COFFEE },     /* "timer" */
+       { 0x20, KEY_FRONT },      /* "max" */
+       { 0x1d, KEY_LEFT },       /* left */
+       { 0x1f, KEY_RIGHT },      /* right */
+       { 0x22, KEY_DOWN },       /* down */
+       { 0x1a, KEY_UP },         /* up */
+       { 0x1e, KEY_OK },         /* "OK" */
+       { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+       { 0x08, KEY_VOLUMEUP },   /* VOL - */
+       { 0x0a, KEY_MUTE },       /* MUTE  */
+       { 0x0b, KEY_CHANNELUP },  /* CH + */
+       { 0x0c, KEY_CHANNELDOWN },/* CH - */
+       { 0x27, KEY_RECORD },     /* ( o) red */
+       { 0x25, KEY_PLAY },       /* ( >) */
+       { 0x24, KEY_REWIND },     /* (<<) */
+       { 0x26, KEY_FORWARD },    /* (>>) */
+       { 0x28, KEY_STOP },       /* ([]) */
+       { 0x29, KEY_PAUSE },      /* ('') */
+       { 0x2b, KEY_PREVIOUS },   /* (<-) */
+       { 0x2a, KEY_NEXT },       /* (>+) */
+       { 0x2d, KEY_INFO },       /* PLAYING */
+       { 0x2e, KEY_HOME },       /* TOP */
+       { 0x2f, KEY_END },        /* END */
+       { 0x30, KEY_SELECT },     /* SELECT */
 };
 
 static struct rc_map_list ati_x10_map = {
index 09e2cc01d1102801d063e33dbc256e0a5a8d8f70..479cdb8978104b1c2510852d736c7f1fbcfbd6aa 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table medion_x10[] = {
-       { 0xf12c, KEY_TV },    /* TV */
-       { 0xf22d, KEY_VCR },   /* VCR */
-       { 0xc904, KEY_DVD },   /* DVD */
-       { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
-       { 0xf32e, KEY_RADIO },     /* RADIO */
-       { 0xca05, KEY_DIRECTORY }, /* PHOTO */
-       { 0xf42f, KEY_INFO },      /* TV-PREVIEW */
-       { 0xf530, KEY_LIST },      /* CHANNEL-LST */
-
-       { 0xe01b, KEY_SETUP }, /* SETUP */
-       { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
-       { 0xcd08, KEY_VOLUMEDOWN },  /* VOL - */
-       { 0xce09, KEY_VOLUMEUP },    /* VOL + */
-       { 0xd00b, KEY_CHANNELUP },   /* CHAN + */
-       { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
-       { 0xc500, KEY_MUTE },        /* MUTE */
-
-       { 0xf732, KEY_RED }, /* red */
-       { 0xf833, KEY_GREEN }, /* green */
-       { 0xf934, KEY_YELLOW }, /* yellow */
-       { 0xfa35, KEY_BLUE }, /* blue */
-       { 0xdb16, KEY_TEXT }, /* TXT */
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
-       { 0xe520, KEY_DELETE }, /* DELETE */
-
-       { 0xfb36, KEY_KEYBOARD }, /* RENAME */
-       { 0xdd18, KEY_SCREEN },   /* SNAPSHOT */
-
-       { 0xdf1a, KEY_UP },    /* up */
-       { 0xe722, KEY_DOWN },  /* down */
-       { 0xe21d, KEY_LEFT },  /* left */
-       { 0xe41f, KEY_RIGHT }, /* right */
-       { 0xe31e, KEY_OK },    /* OK */
-
-       { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
-       { 0xfd38, KEY_EDIT },   /* EDIT IMAGE */
-
-       { 0xe924, KEY_REWIND },   /* rewind  (<<) */
-       { 0xea25, KEY_PLAY },     /* play    ( >) */
-       { 0xeb26, KEY_FORWARD },  /* forward (>>) */
-       { 0xec27, KEY_RECORD },   /* record  ( o) */
-       { 0xed28, KEY_STOP },     /* stop    ([]) */
-       { 0xee29, KEY_PAUSE },    /* pause   ('') */
-
-       { 0xe621, KEY_PREVIOUS },        /* prev */
-       { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
-       { 0xe823, KEY_NEXT },            /* next */
-       { 0xde19, KEY_MENU },            /* MENU */
-       { 0xff3a, KEY_LANGUAGE },        /* AUDIO */
-
-       { 0xc702, KEY_POWER }, /* POWER */
+       { 0x2c, KEY_TV },    /* TV */
+       { 0x2d, KEY_VCR },   /* VCR */
+       { 0x04, KEY_DVD },   /* DVD */
+       { 0x06, KEY_AUDIO }, /* MUSIC */
+
+       { 0x2e, KEY_RADIO },     /* RADIO */
+       { 0x05, KEY_DIRECTORY }, /* PHOTO */
+       { 0x2f, KEY_INFO },      /* TV-PREVIEW */
+       { 0x30, KEY_LIST },      /* CHANNEL-LST */
+
+       { 0x1b, KEY_SETUP }, /* SETUP */
+       { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+       { 0x08, KEY_VOLUMEDOWN },  /* VOL - */
+       { 0x09, KEY_VOLUMEUP },    /* VOL + */
+       { 0x0b, KEY_CHANNELUP },   /* CHAN + */
+       { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+       { 0x00, KEY_MUTE },        /* MUTE */
+
+       { 0x32, KEY_RED }, /* red */
+       { 0x33, KEY_GREEN }, /* green */
+       { 0x34, KEY_YELLOW }, /* yellow */
+       { 0x35, KEY_BLUE }, /* blue */
+       { 0x16, KEY_TEXT }, /* TXT */
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+       { 0x20, KEY_DELETE }, /* DELETE */
+
+       { 0x36, KEY_KEYBOARD }, /* RENAME */
+       { 0x18, KEY_SCREEN },   /* SNAPSHOT */
+
+       { 0x1a, KEY_UP },    /* up */
+       { 0x22, KEY_DOWN },  /* down */
+       { 0x1d, KEY_LEFT },  /* left */
+       { 0x1f, KEY_RIGHT }, /* right */
+       { 0x1e, KEY_OK },    /* OK */
+
+       { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+       { 0x38, KEY_EDIT },   /* EDIT IMAGE */
+
+       { 0x24, KEY_REWIND },   /* rewind  (<<) */
+       { 0x25, KEY_PLAY },     /* play    ( >) */
+       { 0x26, KEY_FORWARD },  /* forward (>>) */
+       { 0x27, KEY_RECORD },   /* record  ( o) */
+       { 0x28, KEY_STOP },     /* stop    ([]) */
+       { 0x29, KEY_PAUSE },    /* pause   ('') */
+
+       { 0x21, KEY_PREVIOUS },        /* prev */
+       { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+       { 0x23, KEY_NEXT },            /* next */
+       { 0x19, KEY_MENU },            /* MENU */
+       { 0x3a, KEY_LANGUAGE },        /* AUDIO */
+
+       { 0x02, KEY_POWER }, /* POWER */
 };
 
 static struct rc_map_list medion_x10_map = {
index ef146520931c40fe19da588d41be3d6a3f00b990..c7f33ec719b49f0cb6ae9460606d3a54c1542610 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table snapstream_firefly[] = {
-       { 0xf12c, KEY_ZOOM },       /* Maximize */
-       { 0xc702, KEY_CLOSE },
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xdb16, KEY_BACK },
-       { 0xdd18, KEY_KPENTER },    /* ent */
-
-       { 0xce09, KEY_VOLUMEUP },
-       { 0xcd08, KEY_VOLUMEDOWN },
-       { 0xcf0a, KEY_MUTE },
-       { 0xd00b, KEY_CHANNELUP },
-       { 0xd10c, KEY_CHANNELDOWN },
-       { 0xc500, KEY_VENDOR },     /* firefly */
-
-       { 0xf32e, KEY_INFO },
-       { 0xf42f, KEY_OPTION },
-
-       { 0xe21d, KEY_LEFT },
-       { 0xe41f, KEY_RIGHT },
-       { 0xe722, KEY_DOWN },
-       { 0xdf1a, KEY_UP },
-       { 0xe31e, KEY_OK },
-
-       { 0xe11c, KEY_MENU },
-       { 0xe520, KEY_EXIT },
-
-       { 0xec27, KEY_RECORD },
-       { 0xea25, KEY_PLAY },
-       { 0xed28, KEY_STOP },
-       { 0xe924, KEY_REWIND },
-       { 0xeb26, KEY_FORWARD },
-       { 0xee29, KEY_PAUSE },
-       { 0xf02b, KEY_PREVIOUS },
-       { 0xef2a, KEY_NEXT },
-
-       { 0xcb06, KEY_AUDIO },      /* Music */
-       { 0xca05, KEY_IMAGES },     /* Photos */
-       { 0xc904, KEY_DVD },
-       { 0xc803, KEY_TV },
-       { 0xcc07, KEY_VIDEO },
-
-       { 0xc601, KEY_HELP },
-       { 0xf22d, KEY_MODE },       /* Mouse */
-
-       { 0xde19, KEY_A },
-       { 0xe01b, KEY_B },
-       { 0xe621, KEY_C },
-       { 0xe823, KEY_D },
+       { 0x2c, KEY_ZOOM },       /* Maximize */
+       { 0x02, KEY_CLOSE },
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x16, KEY_BACK },
+       { 0x18, KEY_KPENTER },    /* ent */
+
+       { 0x09, KEY_VOLUMEUP },
+       { 0x08, KEY_VOLUMEDOWN },
+       { 0x0a, KEY_MUTE },
+       { 0x0b, KEY_CHANNELUP },
+       { 0x0c, KEY_CHANNELDOWN },
+       { 0x00, KEY_VENDOR },     /* firefly */
+
+       { 0x2e, KEY_INFO },
+       { 0x2f, KEY_OPTION },
+
+       { 0x1d, KEY_LEFT },
+       { 0x1f, KEY_RIGHT },
+       { 0x22, KEY_DOWN },
+       { 0x1a, KEY_UP },
+       { 0x1e, KEY_OK },
+
+       { 0x1c, KEY_MENU },
+       { 0x20, KEY_EXIT },
+
+       { 0x27, KEY_RECORD },
+       { 0x25, KEY_PLAY },
+       { 0x28, KEY_STOP },
+       { 0x24, KEY_REWIND },
+       { 0x26, KEY_FORWARD },
+       { 0x29, KEY_PAUSE },
+       { 0x2b, KEY_PREVIOUS },
+       { 0x2a, KEY_NEXT },
+
+       { 0x06, KEY_AUDIO },      /* Music */
+       { 0x05, KEY_IMAGES },     /* Photos */
+       { 0x04, KEY_DVD },
+       { 0x03, KEY_TV },
+       { 0x07, KEY_VIDEO },
+
+       { 0x01, KEY_HELP },
+       { 0x2d, KEY_MODE },       /* Mouse */
+
+       { 0x19, KEY_A },
+       { 0x1b, KEY_B },
+       { 0x21, KEY_C },
+       { 0x23, KEY_D },
 };
 
 static struct rc_map_list snapstream_firefly_map = {
index 39fc923fc46bc74b512d24243cd7387c5cf0f250..1c6015a04f964df6f03e2be376661fca5d84dba4 100644 (file)
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
        switch (tv.model) {
        case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
        case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
        case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
        case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
                break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
                .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
        { USB_DEVICE(0x2040, 0x8200),
                .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+       { USB_DEVICE(0x2040, 0x7260),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+       { USB_DEVICE(0x2040, 0x7213),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
        { },
 };
 
index 881e04c7ffe6dd91f5a96f12aaebe236a7bfba5c..2ca10dfec91fd2347532d35e22ea44fd4ee01298 100644 (file)
@@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
        gspca_dev->usb_err = 0;
 
        /* do the specific subdriver stuff before endpoint selection */
-       gspca_dev->alt = 0;
+       intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+       gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
        if (gspca_dev->sd_desc->isoc_init) {
                ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
                if (ret < 0)
                        goto unlock;
        }
-       intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
        xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
                                   : USB_ENDPOINT_XFER_ISOC;
 
@@ -957,7 +957,7 @@ retry:
                                ret = -EIO;
                                goto out;
                        }
-                       alt = ep_tb[--alt_idx].alt;
+                       gspca_dev->alt = ep_tb[--alt_idx].alt;
                }
        }
 out:
index 89d09a8914f8ea5099d6031d59b5f9117c214b78..82c8817bd32dcddcc74a98593919bd64502de237 100644 (file)
@@ -162,7 +162,6 @@ struct m5mols_version {
  * @pad: media pad
  * @ffmt: current fmt according to resolution type
  * @res_type: current resolution type
- * @code: current code
  * @irq_waitq: waitqueue for the capture
  * @work_irq: workqueue for the IRQ
  * @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
        struct media_pad pad;
        struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
        int res_type;
-       enum v4l2_mbus_pixelcode code;
        wait_queue_head_t irq_waitq;
        struct work_struct work_irq;
        unsigned long flags;
index 05ab3700647e286acf9b01ee994a07cd56eb8014..e0f09e531800d193c1cb0250cb89d820d7fa1236 100644 (file)
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
        int ret = -EINVAL;
        u8 reg;
 
-       if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+       if (mode < REG_PARAMETER || mode > REG_CAPTURE)
                return ret;
 
        ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        struct m5mols_info *info = to_m5mols(sd);
        struct v4l2_mbus_framefmt *format;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        format = __find_format(info, fh, fmt->which, info->res_type);
        if (!format)
                return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        u32 resolution = 0;
        int ret;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        ret = __find_resolution(sd, format, &type, &resolution);
        if (ret < 0)
                return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        if (!sfmt)
                return 0;
 
-       *sfmt           = m5mols_default_ffmt[type];
-       sfmt->width     = format->width;
-       sfmt->height    = format->height;
+
+       format->code = m5mols_default_ffmt[type].code;
+       format->colorspace = V4L2_COLORSPACE_JPEG;
+       format->field = V4L2_FIELD_NONE;
 
        if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+               *sfmt = *format;
                info->resolution = resolution;
-               info->code = format->code;
                info->res_type = type;
        }
 
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
 static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct m5mols_info *info = to_m5mols(sd);
+       u32 code = info->ffmt[info->res_type].code;
 
        if (enable) {
                int ret = -EINVAL;
 
-               if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+               if (is_code(code, M5MOLS_RESTYPE_MONITOR))
                        ret = m5mols_start_monitor(info);
-               if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+               if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
                        ret = m5mols_start_capture(info);
 
                return ret;
index cf2c0fb95f2f47d96121e55a4e4d87a07c076bcf..398f96ffd35e88d212a8d39c8585decfc1d426d0 100644 (file)
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
        mt9m111->rect.height    = MT9M111_MAX_HEIGHT;
        mt9m111->fmt            = &mt9m111_colour_fmts[0];
        mt9m111->lastpage       = -1;
+       mutex_init(&mt9m111->power_lock);
 
        ret = mt9m111_video_probe(client);
        if (ret) {
index 32114a3c0ca78daa889853893d5fb066f8476cf9..7b34b11daf24d02b5ad0a3eba08c2e437cc60b39 100644 (file)
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
        v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
 
        ret = mt9t112_camera_probe(client);
-       if (ret)
+       if (ret) {
                kfree(priv);
+               return ret;
+       }
 
        /* Cannot fail: using the default supported pixel code */
        mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
index 9c5c19f142de598067de4fec22b229eecb4a7862..ee0d0b39cd170e1700b659a7af2e1085c580b9fa 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/irq.h>
 #include <linux/videodev2.h>
 #include <linux/dma-mapping.h>
+#include <linux/slab.h>
 
 #include <media/videobuf-dma-contig.h>
 #include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
        vid_dev->num_displays = 0;
        for_each_dss_dev(dssdev) {
                omap_dss_get_device(dssdev);
+
+               if (!dssdev->driver) {
+                       dev_warn(&pdev->dev, "no driver for display: %s\n",
+                                       dssdev->name);
+                       omap_dss_put_device(dssdev);
+                       continue;
+               }
+
                vid_dev->displays[vid_dev->num_displays++] = dssdev;
        }
 
index e87ae2f634b26e523c406427f22b4b2e362cf5e2..6a6cf388bae446d70a043eac83cb09e2b6a0e4e8 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
index 1d54b86c936bba11252877c0db65bc5a99e70b47..3ea38a8def8e1523121c8142acc769d06436539a 100644 (file)
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
        unsigned long flags;
        struct sgdma_state *sg_state;
 
-       if ((sglen < 0) || ((sglen > 0) & !sglist))
+       if ((sglen < 0) || ((sglen > 0) && !sglist))
                return -EINVAL;
 
        spin_lock_irqsave(&sgdma->lock, flags);
index b0b0fa5a3572fb834a87c45dba668b798524dfad..54a4a3f22e2e4187c5651aaa42cf73dcf9141059 100644 (file)
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
 {
        struct isp_pipeline *pipe =
                to_isp_pipeline(&ccdc->video_out.video.entity);
-       struct video_device *vdev = &ccdc->subdev.devnode;
+       struct video_device *vdev = ccdc->subdev.devnode;
        struct v4l2_event event;
 
        memset(&event, 0, sizeof(event));
index 68d539456c552aa0c5ee16bd999ed09f41dee712..bc0b2c7349b97894d62c6fcdb876eff8613634f7 100644 (file)
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
 
 static void isp_stat_queue_event(struct ispstat *stat, int err)
 {
-       struct video_device *vdev = &stat->subdev.devnode;
+       struct video_device *vdev = stat->subdev.devnode;
        struct v4l2_event event;
        struct omap3isp_stat_event_status *status = (void *)event.u.data;
 
index d1000723c5ae0040f45d2f283a0df45de4ee8a01..f2290578448c416bb809abe4d11d4146da6ead91 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
index 9f2d26b1d4cb8e2af6a48ccdc9c3f66c4bc87715..6806345ec2f0f6a9a26accb862b8eb4aa6d5c565 100644 (file)
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
 static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct soc_camera_sense *sense = icd->sense;
        struct ov6650 *priv = to_ov6650(client);
        bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
index c8d91b0cd9bdd043e54e2ed47391a5736bda61ad..2cc3b9166724f2eaf47ea1c05f5a12e5e7208ea7 100644 (file)
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
                        vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
        }
        set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+       fimc_hw_reset(fimc);
+       cap->buf_index = 0;
+
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
        struct fimc_dev *fimc = ctx->fimc_dev;
        int ret;
 
-       if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+       if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
                return 0;
 
        spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
                fimc_hw_set_rotation(ctx);
                fimc_prepare_dma_offset(ctx, &ctx->d_frame);
                fimc_hw_set_out_dma(ctx);
-               set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+               clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
        }
        spin_unlock(&ctx->slock);
        return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
        int min_bufs;
        int ret;
 
-       fimc_hw_reset(fimc);
        vid_cap->frame_count = 0;
 
        ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
        max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
        min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
        min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
-       if (fimc->id == 1 && var->pix_hoff)
+       if (var->min_vsize_align == 1 && !rotation)
                align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
 
        depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
 
        mutex_lock(&fimc->lock);
        set_frame_bounds(ff, mf->width, mf->height);
+       fimc->vid_cap.mf = *mf;
        ff->fmt = ffmt;
 
        /* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
        media_entity_cleanup(&sd->entity);
        v4l2_device_unregister_subdev(sd);
        kfree(sd);
-       sd = NULL;
+       fimc->vid_cap.subdev = NULL;
 }
 
 /* Set default format at the sensor and host interface */
index 19ca6db38b2f87f7951fe423cfe6741d3733acac..07c6254faee32b1ef3ee75964faa2b63b1f7615d 100644 (file)
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
 static struct fimc_fmt fimc_formats[] = {
        {
                .name           = "RGB565",
-               .fourcc         = V4L2_PIX_FMT_RGB565X,
+               .fourcc         = V4L2_PIX_FMT_RGB565,
                .depth          = { 16 },
                .color          = S5P_FIMC_RGB565,
                .memplanes      = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
                mod_x = 6; /* 64 x 32 pixels tile */
                mod_y = 5;
        } else {
-               if (fimc->id == 1 && variant->pix_hoff)
+               if (variant->min_vsize_align == 1)
                        mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
                else
-                       mod_y = mod_x;
+                       mod_y = ffs(variant->min_vsize_align) - 1;
        }
-       dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
 
        v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
                &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
                fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
 
        /* Get pixel alignment constraints. */
-       if (fimc->id == 1 && fimc->variant->pix_hoff)
+       if (fimc->variant->min_vsize_align == 1)
                halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
        else
-               halign = ffs(min_size) - 1;
+               halign = ffs(fimc->variant->min_vsize_align) - 1;
 
        for (i = 0; i < f->fmt->colplanes; i++)
                depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
        pdata = pdev->dev.platform_data;
        fimc->pdata = pdata;
 
-       set_bit(ST_LPM, &fimc->state);
 
        init_waitqueue_head(&fimc->irq_queue);
        spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
        /* Enable clocks and perform basic initalization */
        clk_enable(fimc->clock[CLK_GATE]);
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
 
        /* Resume the capture or mem-to-mem device */
        if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
                return 0;
        }
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
        struct fimc_dev *fimc = platform_get_drvdata(pdev);
 
        pm_runtime_disable(&pdev->dev);
-       fimc_runtime_suspend(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
        vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[0],
 };
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit = &s5p_pix_limit[1],
 };
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 1,
+       .min_vsize_align = 1,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[3],
 };
index a6936dad5b1025b196ef6c1acd5e40131a44d7c3..c7f01c47b20fe354e101bbf4ab904cab18e3ec55 100644 (file)
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
  * @min_inp_pixsize: minimum input pixel size
  * @min_out_pixsize: minimum output pixel size
  * @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
  * @out_buf_count: the number of buffers in output DMA sequence
  */
 struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
        u16             min_inp_pixsize;
        u16             min_out_pixsize;
        u16             hor_offs_align;
+       u16             min_vsize_align;
        u16             out_buf_count;
 };
 
index cc337b1de91392ffbcd00eebe33095c037b1b97c..615c862f0360ef20061e28701d1a99f460d0a7e1 100644 (file)
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
        sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
                                       s_info->pdata->board_info, NULL);
        if (IS_ERR_OR_NULL(sd)) {
+               i2c_put_adapter(adapter);
                v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
                return NULL;
        }
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
 static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
+       struct i2c_adapter *adapter;
 
        if (!client)
                return;
        v4l2_device_unregister_subdev(sd);
+       adapter = client->adapter;
        i2c_unregister_device(client);
-       i2c_put_adapter(client->adapter);
+       if (adapter)
+               i2c_put_adapter(adapter);
 }
 
 static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
 
 static int fimc_md_register_video_nodes(struct fimc_md *fmd)
 {
+       struct video_device *vdev;
        int i, ret = 0;
 
        for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
                if (!fmd->fimc[i])
                        continue;
 
-               if (fmd->fimc[i]->m2m.vfd)
-                       ret = video_register_device(fmd->fimc[i]->m2m.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
-               if (ret)
-                       break;
-               if (fmd->fimc[i]->vid_cap.vfd)
-                       ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
+               vdev = fmd->fimc[i]->m2m.vfd;
+               if (vdev) {
+                       ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+                       if (ret)
+                               break;
+                       v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                                 vdev->name, video_device_node_name(vdev));
+               }
+
+               vdev = fmd->fimc[i]->vid_cap.vfd;
+               if (vdev == NULL)
+                       continue;
+               ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+               v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                         vdev->name, video_device_node_name(vdev));
        }
 
        return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                        if (WARN(csis == NULL,
                                 "MIPI-CSI interface specified "
                                 "but s5p-csis module is not loaded!\n"))
-                               continue;
+                               return -EINVAL;
 
                        ret = media_entity_create_link(&sensor->entity, 0,
                                              &csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        struct fimc_md *fmd;
        int ret;
 
-       if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
-               return -EINVAL;
-
        fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
        if (!fmd)
                return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        if (ret)
                goto err3;
 
-       ret = fimc_md_register_sensor_entities(fmd);
-       if (ret)
-               goto err3;
+       if (pdev->dev.platform_data) {
+               ret = fimc_md_register_sensor_entities(fmd);
+               if (ret)
+                       goto err3;
+       }
        ret = fimc_md_create_links(fmd);
        if (ret)
                goto err3;
index 20e664e341632df52877c78ef10ce96f6bc57301..44f5c2d1920bb50dd0dfe8e436eec1df32aa8c3f 100644 (file)
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
        cfg = readl(dev->regs + S5P_CIGCTRL);
        cfg &= ~S5P_CIGCTRL_SWRST;
        writel(cfg, dev->regs + S5P_CIGCTRL);
+
+       if (dev->variant->out_buf_count > 4)
+               fimc_hw_set_dma_seq(dev, 0xF);
 }
 
 static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
        struct fimc_scaler *sc = &ctx->scaler;
        struct fimc_frame *src_frame = &ctx->s_frame;
        struct fimc_frame *dst_frame = &ctx->d_frame;
-       u32 cfg = 0;
+
+       u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+       cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+                S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+                S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+                S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+                S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
 
        if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
                cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
        fimc_hw_set_scaler(ctx);
 
        cfg = readl(dev->regs + S5P_CISCCTRL);
+       cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
 
        if (variant->has_mainscaler_ext) {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
                cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CIEXTEN);
        } else {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
index 1e8cdb77d4b8540a991bb55d98ef35225596c98f..dff9dc79879566356df6ed580d28119793ae3b09 100644 (file)
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
                .num_planes = 1,
        },
        {
-               .name = "H264 Encoded Stream",
+               .name = "H263 Encoded Stream",
                .fourcc = V4L2_PIX_FMT_H263,
                .codec_mode = S5P_FIMV_CODEC_H263_ENC,
                .type = MFC_FMT_ENC,
index e16d3a4bc1dcc0a0c4522e252b61a53d0e8e78d6..b47d0c06ecf5ab3ee66c911f1d035df1948b31e7 100644 (file)
@@ -16,6 +16,7 @@
 #include <media/v4l2-ioctl.h>
 #include <linux/videodev2.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/version.h>
 #include <linux/timer.h>
 #include <media/videobuf2-dma-contig.h>
index f390682629cf8e6ee832275e47e2ddc0ef1af433..c51decfcae197cedb83f6d68169cc0a6251f621d 100644 (file)
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
        ret = sh_mobile_ceu_soft_reset(pcdev);
 
        csi2_sd = find_csi2(pcdev);
-       if (csi2_sd)
-               csi2_sd->grp_id = (long)icd;
+       if (csi2_sd) {
+               csi2_sd->grp_id = soc_camera_grp_id(icd);
+               v4l2_set_subdev_hostdata(csi2_sd, icd);
+       }
 
        ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
        if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
 {
        if (pcdev->csi2_pdev) {
                struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-               if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+               if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
                        return csi2_sd;
        }
 
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
                        /* Try 2560x1920, 1280x960, 640x480, 320x240 */
                        mf.width        = 2560 >> shift;
                        mf.height       = 1920 >> shift;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        s_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                        if (ret < 0)
                                return ret;
                        shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
        bool ceu_1to1;
        int ret;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                        soc_camera_grp_id(icd), video,
                                         s_mbus_fmt, mf);
        if (ret < 0)
                return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
                tmp_h = min(2 * tmp_h, max_height);
                mf->width = tmp_w;
                mf->height = tmp_h;
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                s_mbus_fmt, mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, mf);
                dev_geo(dev, "Camera scaled to %ux%u\n",
                        mf->width, mf->height);
                if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
        }
 
        if (interm_width < icd->user_width || interm_height < icd->user_height) {
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
-                                                s_mbus_fmt, &mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                if (ret < 0)
                        return ret;
 
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
        mf.code         = xlate->code;
        mf.colorspace   = pix->colorspace;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+       ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+                                        video, try_mbus_fmt, &mf);
        if (ret < 0)
                return ret;
 
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
                         */
                        mf.width = 2560;
                        mf.height = 1920;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        try_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       try_mbus_fmt, &mf);
                        if (ret < 0) {
                                /* Shouldn't actually happen... */
                                dev_err(icd->parent,
index ea4f0473ed3be5c35c076c78e8f370e224175821..8a652b53ff7e9a8a1a4ad0f00bf7a874bf8f3bf6 100644 (file)
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
                                 const struct v4l2_mbus_config *cfg)
 {
        struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
                                              .flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
 static int sh_csi2_client_connect(struct sh_csi2 *priv)
 {
        struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-       struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct device *dev = v4l2_get_subdevdata(&priv->subdev);
        struct v4l2_mbus_config cfg;
index b72580c38957855434e12c265567864ddf49e583..62e4312515cb99a5e640cfe440954f1b8f5994cd 100644 (file)
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
        }
 
        sd = soc_camera_to_subdev(icd);
-       sd->grp_id = (long)icd;
+       sd->grp_id = soc_camera_grp_id(icd);
+       v4l2_set_subdev_hostdata(sd, icd);
 
        if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
                goto ectrl;
index 43c0ebb81956188b22f7ce2953eddc936f39ae3a..b7b2d3483fd4e1e1f8bb5b6a2feb6e7c349c57f3 100644 (file)
@@ -4,7 +4,7 @@
  * Debugfs support for the AB5500 MFD driver
  */
 
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/mfd/ab5500/ab5500.h>
index 1e9173804ede2bacb93a7179d5d7c817c79dfcf5..d3d572b2317b888be174bf52d3d17c65b391ef07 100644 (file)
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
 
 static struct resource __devinitdata ab8500_chargalg_resources[] = {};
 
+#ifdef CONFIG_DEBUG_FS
 static struct resource __devinitdata ab8500_debug_resources[] = {
        {
                .name   = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
                .flags  = IORESOURCE_IRQ,
        },
 };
+#endif
 
 static struct resource __devinitdata ab8500_usb_resources[] = {
        {
index f1d88483112cac13565cce2a8f88d3092c9d516c..8d816cce8322ebecdf8d40e29f0c0b0a3aedcbdb 100644 (file)
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
 
        ret = __adp5520_read(chip->client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = __adp5520_write(chip->client, reg, reg_val);
        }
index 1b79c37fd59901b882fc0b995e6182fe43a4eedb..1924b857a0fbf6355d9acfcd96fa387b841c6976 100644 (file)
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __da903x_write(chip->client, reg, reg_val);
        }
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
        struct da903x_chip *chip = i2c_get_clientdata(client);
 
        da903x_remove_subdevs(chip);
+       free_irq(client->irq, chip);
        kfree(chip);
        return 0;
 }
index 1e9ee533eacb8d204e860f9881d58e2675adf126..ef39528088f2298a47e7f62f69317b2a6af428ee 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
index bba26d96c24075a3cbca9a320fabe3177b4de1a8..a5ddf31b60ca89d3f228997f7da526b126449d94 100644 (file)
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
        }
index 6f5b8cf2f652b8edf6accf9db107d8f63ea72c23..c1da84bc1573f563c4b698f0bc786068e7f911bc 100644 (file)
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
                goto out;
        }
 
-       data &= mask;
+       data &= ~mask;
        err = tps65910_i2c_write(tps65910, reg, 1, &data);
        if (err)
                dev_err(tps65910->dev, "write to reg %x failed\n", reg);
index bfbd66021afd383703fad96e890975853c2bf063..61e70cfaa774fb977adcba8cc697a06edd673210 100644 (file)
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /*
         * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /* [MSG1] fill the register address data */
        msg = &twl->xfer_msg[0];
index f062c8cc6c38f3e40337444b91111646afefde88..29f11e0765feef54093b839b6e288ae56bc0bf3f 100644 (file)
@@ -432,6 +432,7 @@ struct sih_agent {
        u32                     edge_change;
 
        struct mutex            irq_lock;
+       char                    *irq_name;
 };
 
 /*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
  * Generic handler for SIH interrupts ... we "know" this is called
  * in task context, with IRQs enabled.
  */
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
 {
        struct sih_agent *agent = irq_get_handler_data(irq);
        const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                pr_err("twl4030: %s SIH, read ISR error %d\n",
                        sih->name, isr);
                /* REVISIT:  recover; eventually mask it all, etc */
-               return;
+               return IRQ_HANDLED;
        }
 
        while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                        pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
                                sih->name, irq);
        }
+       return IRQ_HANDLED;
 }
 
 static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
                activate_irq(irq);
        }
 
-       status = irq_base;
        twl4030_irq_next += i;
 
        /* replace generic PIH handler (handle_simple_irq) */
        irq = sih_mod + twl4030_irq_base;
        irq_set_handler_data(irq, agent);
-       irq_set_chained_handler(irq, handle_twl4030_sih);
+       agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+       status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+                                     agent->irq_name ?: sih->name, NULL);
 
        pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
                        irq, irq_base, twl4030_irq_next - 1);
 
-       return status;
+       return status < 0 ? status : irq_base;
 }
 
 /* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
        }
 
        /* install an irq handler to demultiplex the TWL4030 interrupt */
-       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
-                                       "TWL4030-PIH", NULL);
+       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+                                     IRQF_ONESHOT,
+                                     "TWL4030-PIH", NULL);
        if (status < 0) {
                pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
                goto fail_rqirq;
index 5d6ba132837e8efb5f470d48784d5bb5f9809509..61894fced8ea281570bf80320fd593efa0d25740 100644 (file)
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
 
        switch (wm8994->type) {
        case WM8958:
+       case WM1811:
                ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
                if (ret < 0) {
                        dev_err(dev, "Failed to read power status: %d\n", ret);
index d593878d66d054e76f75860aec7176b9c012a5cf..5664696f2d3a8512c6ef96aca461dde22e5d486f 100644 (file)
@@ -472,7 +472,7 @@ config BMP085
          module will be called bmp085.
 
 config PCH_PHUB
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"
+       tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
        depends on PCI
        help
          This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
@@ -480,12 +480,13 @@ config PCH_PHUB
          processor. The Topcliff has MAC address and Option ROM data in SROM.
          This driver can access MAC address and Option ROM data in SROM.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         This driver also can be used for LAPIS Semiconductor's IOH,
+         ML7213/ML7223/ML7831.
+         ML7213 which is for IVI(In-Vehicle Infotainment) use.
+         ML7223 IOH is for MP(Media Phone) use.
+         ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
          To compile this driver as a module, choose M here: the module will
          be called pch_phub.
index a662f5987b6892591008291e3b5b05506918ed9e..82b2cb77ae197b2124ede9131dc4cbe0902ce0e8 100644 (file)
@@ -100,7 +100,7 @@ enum dpot_devid {
        AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
        AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
                        BRDAC0, 7, 28),
-       AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+       AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
                        BRDAC0, 8, 29),
        AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
                        BRDAC0 | BRDAC1, 8, 30),
index 7ce6065dc20e806a06845f08bcec084adbb4418d..eb5cd28bc6d8d7a1917f7f15d82b267b692315f8 100644 (file)
@@ -945,8 +945,7 @@ static int fpga_of_remove(struct platform_device *op)
 /* CTL-CPLD Version Register */
 #define CTL_CPLD_VERSION       0x2000
 
-static int fpga_of_probe(struct platform_device *op,
-                        const struct of_device_id *match)
+static int fpga_of_probe(struct platform_device *op)
 {
        struct device_node *of_node = op->dev.of_node;
        struct device *this_device;
@@ -1107,7 +1106,7 @@ static struct of_device_id fpga_of_match[] = {
        {},
 };
 
-static struct of_platform_driver fpga_of_driver = {
+static struct platform_driver fpga_of_driver = {
        .probe          = fpga_of_probe,
        .remove         = fpga_of_remove,
        .driver         = {
@@ -1124,12 +1123,12 @@ static struct of_platform_driver fpga_of_driver = {
 static int __init fpga_init(void)
 {
        led_trigger_register_simple("fpga", &ledtrig_fpga);
-       return of_register_platform_driver(&fpga_of_driver);
+       return platform_driver_register(&fpga_of_driver);
 }
 
 static void __exit fpga_exit(void)
 {
-       of_unregister_platform_driver(&fpga_of_driver);
+       platform_driver_unregister(&fpga_of_driver);
        led_trigger_unregister_simple(ledtrig_fpga);
 }
 
index 3965821fef174c308ac73e37072d552287985d91..14e974b2a7812452d14ad2d92979e8d84483da7e 100644 (file)
@@ -1249,8 +1249,7 @@ static bool dma_filter(struct dma_chan *chan, void *data)
        return true;
 }
 
-static int data_of_probe(struct platform_device *op,
-                        const struct of_device_id *match)
+static int data_of_probe(struct platform_device *op)
 {
        struct device_node *of_node = op->dev.of_node;
        struct device *this_device;
@@ -1401,7 +1400,7 @@ static struct of_device_id data_of_match[] = {
        {},
 };
 
-static struct of_platform_driver data_of_driver = {
+static struct platform_driver data_of_driver = {
        .probe          = data_of_probe,
        .remove         = data_of_remove,
        .driver         = {
@@ -1417,12 +1416,12 @@ static struct of_platform_driver data_of_driver = {
 
 static int __init data_init(void)
 {
-       return of_register_platform_driver(&data_of_driver);
+       return platform_driver_register(&data_of_driver);
 }
 
 static void __exit data_exit(void)
 {
-       of_unregister_platform_driver(&data_of_driver);
+       platform_driver_unregister(&data_of_driver);
 }
 
 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
index 26cf12ca7f503e5d2babb111ab6eaf132cf5c5a2..701edf6589705cb42f290a04c233239f7248760a 100644 (file)
@@ -85,7 +85,7 @@ config EEPROM_93XX46
 
 config EEPROM_DIGSY_MTC_CFG
        bool "DigsyMTC display configuration EEPROMs device"
-       depends on PPC_MPC5200_GPIO && GPIOLIB && SPI_GPIO
+       depends on GPIO_MPC5200 && SPI_GPIO
        help
          This option enables access to display configuration EEPROMs
          on digsy_mtc board. You have to additionally select Microwire
index 7b33de95c4bf5ff762e8f7bd9d56d671248f509b..0ff4b02177be7b37c64dfe6b5513212ef7afee19 100644 (file)
@@ -63,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
        eeprom->reg_data_out = 0;
        eeprom->reg_data_clock = 0;
        eeprom->reg_chip_select = 1;
+       eeprom->drive_data = 1;
        eeprom->register_write(eeprom);
 
        /*
@@ -101,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
         */
        eeprom->reg_data_in = 0;
        eeprom->reg_data_out = 0;
+       eeprom->drive_data = 1;
 
        /*
         * Start writing all bits.
@@ -140,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
         */
        eeprom->reg_data_in = 0;
        eeprom->reg_data_out = 0;
+       eeprom->drive_data = 0;
 
        /*
         * Start reading all bits.
@@ -231,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
 }
 EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
 
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+       u16 command;
+
+       /* start the command */
+       eeprom_93cx6_startup(eeprom);
+
+       /* create command to enable/disable */
+
+       command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+       command <<= (eeprom->width - 2);
+
+       eeprom_93cx6_write_bits(eeprom, command,
+                               PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+       eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+       int timeout = 100;
+       u16 command;
+
+       /* start the command */
+       eeprom_93cx6_startup(eeprom);
+
+       command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+       command |= addr;
+
+       /* send write command */
+       eeprom_93cx6_write_bits(eeprom, command,
+                               PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+       /* send data */
+       eeprom_93cx6_write_bits(eeprom, data, 16);
+
+       /* get ready to check for busy */
+       eeprom->drive_data = 0;
+       eeprom->reg_chip_select = 1;
+       eeprom->register_write(eeprom);
+
+       /* wait at-least 250ns to get DO to be the busy signal */
+       usleep_range(1000, 2000);
+
+       /* wait for DO to go high to signify finish */
+
+       while (true) {
+               eeprom->register_read(eeprom);
+
+               if (eeprom->reg_data_out)
+                       break;
+
+               usleep_range(1000, 2000);
+
+               if (--timeout <= 0) {
+                       printk(KERN_ERR "%s: timeout\n", __func__);
+                       break;
+               }
+       }
+
+       eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
index dee33addcaebf82c2652e089830fff7d442a8ed2..10fc4785dba7dd65330a7008db87a4423a0bedd5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
                                              (Intel EG20T PCH)*/
 #define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
-                                               offset(OKI SEMICONDUCTOR ML7213)
+                                               offset(LAPIS Semicon ML7213)
                                              */
 #define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
-                                               offset(OKI SEMICONDUCTOR ML7223)
+                                               offset(LAPIS Semicon ML7223)
                                              */
 
 /* MAX number of INT_REDUCE_CONTROL registers */
@@ -73,6 +73,9 @@
 #define PCI_DEVICE_ID_ROHM_ML7223_mPHUB        0x8012 /* for Bus-m */
 #define PCI_DEVICE_ID_ROHM_ML7223_nPHUB        0x8002 /* for Bus-n */
 
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
+
 /* SROM ACCESS Macro */
 #define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
 
  * @pch_mac_start_address:             MAC address area start address
  * @pch_opt_rom_start_address:         Option ROM start address
  * @ioh_type:                          Save IOH type
+ * @pdev:                              pointer to pci device struct
  */
 struct pch_phub_reg {
        u32 phub_id_reg;
@@ -136,6 +140,7 @@ struct pch_phub_reg {
        u32 pch_mac_start_address;
        u32 pch_opt_rom_start_address;
        int ioh_type;
+       struct pci_dev *pdev;
 };
 
 /* SROM SPEC for MAC address assignment offset */
@@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
        int retval;
        int i;
 
-       if (chip->ioh_type == 1) /* EG20T */
+       if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
                retval = pch_phub_gbe_serial_rom_conf(chip);
        else    /* ML7223 */
                retval = pch_phub_gbe_serial_rom_conf_mp(chip);
@@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
        unsigned int orom_size;
        int ret;
        int err;
+       ssize_t rom_size;
 
        struct pch_phub_reg *chip =
                dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
        }
 
        /* Get Rom signature */
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address)
+               goto exrom_map_err;
+
        pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
                                (unsigned char *)&rom_signature);
        rom_signature &= 0xff;
@@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
                goto return_err;
        }
 return_ok:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
        mutex_unlock(&pch_phub_mutex);
        return addr_offset;
 
 return_err:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+exrom_map_err:
        mutex_unlock(&pch_phub_mutex);
 return_err_nomutex:
        return err;
@@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
        int err;
        unsigned int addr_offset;
        int ret;
+       ssize_t rom_size;
        struct pch_phub_reg *chip =
                dev_get_drvdata(container_of(kobj, struct device, kobj));
 
@@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
                goto return_ok;
        }
 
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address) {
+               err = -ENOMEM;
+               goto exrom_map_err;
+       }
+
        for (addr_offset = 0; addr_offset < count; addr_offset++) {
                if (PCH_PHUB_OROM_SIZE < off + addr_offset)
                        goto return_ok;
@@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
        }
 
 return_ok:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
        mutex_unlock(&pch_phub_mutex);
        return addr_offset;
 
 return_err:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+
+exrom_map_err:
        mutex_unlock(&pch_phub_mutex);
        return err;
 }
@@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
 {
        u8 mac[8];
        struct pch_phub_reg *chip = dev_get_drvdata(dev);
+       ssize_t rom_size;
+
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address)
+               return -ENOMEM;
 
        pch_phub_read_gbe_mac_addr(chip, mac);
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
 
        return sprintf(buf, "%pM\n", mac);
 }
@@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
        u8 mac[6];
+       ssize_t rom_size;
        struct pch_phub_reg *chip = dev_get_drvdata(dev);
 
        if (count != 18)
@@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
                (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
                (u32 *)&mac[4], (u32 *)&mac[5]);
 
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address)
+               return -ENOMEM;
+
        pch_phub_write_gbe_mac_addr(chip, mac);
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
 
        return count;
 }
@@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
        int retval;
 
        int ret;
-       ssize_t rom_size;
        struct pch_phub_reg *chip;
 
        chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
@@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
                "in pch_phub_base_address variable is %p\n", __func__,
                chip->pch_phub_base_address);
 
-       if (id->driver_data != 3) {
-               chip->pch_phub_extrom_base_address =\
-                                                  pci_map_rom(pdev, &rom_size);
-               if (chip->pch_phub_extrom_base_address == 0) {
-                       dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__);
-                       ret = -ENOMEM;
-                       goto err_pci_map;
-               }
-               dev_dbg(&pdev->dev, "%s : "
-                       "pci_map_rom SUCCESS and value in "
-                       "pch_phub_extrom_base_address variable is %p\n",
-                       __func__, chip->pch_phub_extrom_base_address);
-       }
+       chip->pdev = pdev; /* Save pci device struct */
 
        if (id->driver_data == 1) { /* EG20T PCH */
                const char *board_name;
@@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
                chip->pch_opt_rom_start_address =\
                                                 PCH_PHUB_ROM_START_ADDR_ML7223;
                chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
+       } else if (id->driver_data == 5) { /* ML7831 */
+               retval = sysfs_create_file(&pdev->dev.kobj,
+                                          &dev_attr_pch_mac.attr);
+               if (retval)
+                       goto err_sysfs_create;
+
+               retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+               if (retval)
+                       goto exit_bin_attr;
+
+               /* set the prefech value */
+               iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
+               /* set the interrupt delay value */
+               iowrite32(0x25, chip->pch_phub_base_address + 0x44);
+               chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
+               chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
        }
 
        chip->ioh_type = id->driver_data;
@@ -773,8 +812,6 @@ exit_bin_attr:
        sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
 
 err_sysfs_create:
-       pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
-err_pci_map:
        pci_iounmap(pdev, chip->pch_phub_base_address);
 err_pci_iomap:
        pci_release_regions(pdev);
@@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev)
 
        sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
        sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
-       pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
        pci_iounmap(pdev, chip->pch_phub_base_address);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
@@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2,  },
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3,  },
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4,  },
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5,  },
        { }
 };
 MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
@@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void)
 module_init(pch_phub_pci_init);
 module_exit(pch_phub_pci_exit);
 
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
 MODULE_LICENSE("GPL");
index 42f067347bc70fd8f24466ed4384408191812611..3fac67a5204cfa768717a00b6628079fd37f939d 100644 (file)
@@ -576,7 +576,7 @@ xpnet_init(void)
         * report an error if the data is not retrievable and the
         * packet will be dropped.
         */
-       xpnet_device->features = NETIF_F_NO_CSUM;
+       xpnet_device->features = NETIF_F_HW_CSUM;
 
        result = register_netdev(xpnet_device);
        if (result != 0) {
index cfbddbef11de3b67c54be85797dd124843ca8596..43d073bc1d9c5fef61a1cb3a1e741a6cd6b6e0f9 100644 (file)
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void)
 }
 module_exit(spear_pcie_gadget_exit);
 
-MODULE_ALIAS("pcie-gadget-spear");
+MODULE_ALIAS("platform:pcie-gadget-spear");
 MODULE_AUTHOR("Pratyush Anand");
 MODULE_LICENSE("GPL");
index a1cb21f95302c497157dc97ed85f9acd68a3c10d..1e0e27cbe98786e82abf3d5d9e6ede36cc8f0eb2 100644 (file)
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
                  MMC_QUIRK_BLK_NO_CMD23),
        MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
                  MMC_QUIRK_BLK_NO_CMD23),
+
+       /*
+        * Some Micron MMC cards needs longer data read timeout than
+        * indicated in CSD.
+        */
+       MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+                 MMC_QUIRK_LONG_READ_TIME),
+
        END_FIXUP
 };
 
index 5278ffb20e74bbba9b5508e008171165ad296f16..950b97d7412a4f6f5f562401f132ed0d568a969a 100644 (file)
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
                        data->timeout_clks = 0;
                }
        }
+
+       /*
+        * Some cards require longer data read timeout than indicated in CSD.
+        * Address this by setting the read timeout to a "reasonably high"
+        * value. For the cards tested, 300ms has proven enough. If necessary,
+        * this value can be increased if other problematic cards require this.
+        */
+       if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+               data->timeout_ns = 300000000;
+               data->timeout_clks = 0;
+       }
+
        /*
         * Some cards need very high timeouts if driven in SPI mode.
         * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
        mmc_host_clk_release(host);
 }
 
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+       struct mmc_card *card;
+       unsigned int timeout;
+       unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+       int err = 0;
+
+       card = host->card;
+
+       /*
+        * Send power notify command only if card
+        * is mmc and notify state is powered ON
+        */
+       if (card && mmc_card_mmc(card) &&
+           (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+                       notify_type = EXT_CSD_POWER_OFF_SHORT;
+                       timeout = card->ext_csd.generic_cmd6_time;
+                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+               } else {
+                       notify_type = EXT_CSD_POWER_OFF_LONG;
+                       timeout = card->ext_csd.power_off_longtime;
+                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
+               }
+
+               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+                                EXT_CSD_POWER_OFF_NOTIFICATION,
+                                notify_type, timeout);
+
+               if (err && err != -EBADMSG)
+                       pr_err("Device failed to respond within %d poweroff "
+                              "time. Forcefully powering down the device\n",
+                              timeout);
+
+               /* Set the card state to no notification after the poweroff */
+               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+       }
+}
+
 /*
  * Apply power to the MMC stack.  This is a two-stage process.
  * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
 
 void mmc_power_off(struct mmc_host *host)
 {
-       struct mmc_card *card;
-       unsigned int notify_type;
-       unsigned int timeout;
-       int err;
-
        mmc_host_clk_hold(host);
 
-       card = host->card;
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
-       if (card && mmc_card_mmc(card) &&
-           (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
-               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
-                       notify_type = EXT_CSD_POWER_OFF_SHORT;
-                       timeout = card->ext_csd.generic_cmd6_time;
-                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
-               } else {
-                       notify_type = EXT_CSD_POWER_OFF_LONG;
-                       timeout = card->ext_csd.power_off_longtime;
-                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
-               }
-
-               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                                EXT_CSD_POWER_OFF_NOTIFICATION,
-                                notify_type, timeout);
-
-               if (err && err != -EBADMSG)
-                       pr_err("Device failed to respond within %d poweroff "
-                              "time. Forcefully powering down the device\n",
-                              timeout);
-
-               /* Set the card state to no notification after the poweroff */
-               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
-       }
+       mmc_poweroff_notify(host);
 
        /*
         * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
 
        mmc_bus_get(host);
 
-       if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+       if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
                err = host->bus_ops->sleep(host);
 
        mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
                 * pre-claim the host.
                 */
                if (mmc_try_claim_host(host)) {
-                       if (host->bus_ops->suspend)
+                       if (host->bus_ops->suspend) {
+                               /*
+                                * For eMMC 4.5 device send notify command
+                                * before sleep, because in sleep state eMMC 4.5
+                                * devices respond to only RESET and AWAKE cmd
+                                */
+                               mmc_poweroff_notify(host);
                                err = host->bus_ops->suspend(host);
+                       }
+                       mmc_do_release_host(host);
+
                        if (err == -ENOSYS || !host->bus_ops->resume) {
                                /*
                                 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
                                host->pm_flags = 0;
                                err = 0;
                        }
-                       mmc_do_release_host(host);
                } else {
                        err = -EBUSY;
                }
index e8a5eb38748be34a002aa24a5ba28af9c472c182..d31c78b72b0fd8d1e53adb2f403454790896fafe 100644 (file)
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        host->max_blk_size = 512;
        host->max_blk_count = PAGE_CACHE_SIZE / 512;
 
-       /*
-        * Enable runtime power management by default. This flag was added due
-        * to runtime power management causing disruption for some users, but
-        * the power on/off code has been improved since then.
-        *
-        * We'll enable this flag by default as an experiment, and if no
-        * problems are reported, we will follow up later and remove the flag
-        * altogether.
-        */
-       host->caps = MMC_CAP_POWER_OFF_CARD;
-
        return host;
 
 free:
index dbf421a6279c702d91baeede5bb5f1e55dcb6fcd..d240427c12462dd545b233d6d6e75787e4b48abb 100644 (file)
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
         * set the notification byte in the ext_csd register of device
         */
        if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
-           (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+           (card->ext_csd.rev >= 6)) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_POWER_OFF_NOTIFICATION,
                                 EXT_CSD_POWER_ON,
                                 card->ext_csd.generic_cmd6_time);
                if (err && err != -EBADMSG)
                        goto free_card;
-       }
 
-       if (!err)
-               card->poweroff_notify_state = MMC_POWERED_ON;
+               /*
+                * The err can be -EBADMSG or 0,
+                * so check for success and update the flag
+                */
+               if (!err)
+                       card->poweroff_notify_state = MMC_POWERED_ON;
+       }
 
        /*
         * Activate high speed (if supported)
index 50b5f9926f6462d7836434c89a71213f418faa9c..0726e59fd418fb4b8520de940d76214db15c2ccc 100644 (file)
@@ -675,7 +675,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
              unsigned int status)
 {
        /* First check for errors */
-       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                     MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
                u32 remain, success;
 
                /* Terminate the DMA transfer */
@@ -754,8 +755,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        }
 
        if (!cmd->data || cmd->error) {
-               if (host->data)
+               if (host->data) {
+                       /* Terminate the DMA transfer */
+                       if (dma_inprogress(host))
+                               mmci_dma_data_error(host);
                        mmci_stop_data(host);
+               }
                mmci_request_end(host, cmd->mrq);
        } else if (!(cmd->data->flags & MMC_DATA_READ)) {
                mmci_start_data(host, cmd->data);
@@ -955,8 +960,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
 
                data = host->data;
-               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
-                             MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                             MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+                             MCI_DATABLOCKEND) && data)
                        mmci_data_irq(host, data, status);
 
                cmd = host->cmd;
index 325ea61e12d37d4d47cb9f7ee82d795f11fc8ccf..8e0fbe99404778ee1cc4f512b767479d7dad9855 100644 (file)
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                                "failed to config DMA channel. Falling back to PIO\n");
                        dma_release_channel(host->dma);
                        host->do_dma = 0;
+                       host->dma = NULL;
                }
        }
 
index 101cd31c82207982cf5986390a80f91c1053c535..d5fe43d53c51894ff9e861dbced02bfde10ef705 100644 (file)
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
                        host->data->sg_len,
                        omap_hsmmc_get_dma_dir(host, host->data));
                omap_free_dma(dma_ch);
+               host->data->host_cookie = 0;
        }
        host->data = NULL;
 }
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
        struct mmc_data *data = mrq->data;
 
        if (host->use_dma) {
-               dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-                            omap_hsmmc_get_dma_dir(host, data));
+               if (data->host_cookie)
+                       dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+                                    data->sg_len,
+                                    omap_hsmmc_get_dma_dir(host, data));
                data->host_cookie = 0;
        }
 }
index 4b920b7621cfe7cd214d7c3ed30d848de383e1ad..b4257e700617c6e4f51455a462df1f063dfa0294 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mmc/host.h>
+#include <linux/module.h>
 #include <mach/cns3xxx.h>
 #include "sdhci-pltfm.h"
 
@@ -108,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
        .driver         = {
                .name   = "sdhci-cns3xxx",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_cns3xxx_probe,
        .remove         = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_cns3xxx_init(void)
index f2d29dca442074e319da8fc050ea28777ddf32fc..a81312c91f7086625d10f963df9f31c0b64e0c4c 100644 (file)
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
        .driver         = {
                .name   = "sdhci-dove",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_dove_probe,
        .remove         = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_dove_init(void)
index 4b976f00ea85137913a2b54e7dce63e590b809ac..38ebc4ea259fcb659f855fe446e45e0dc7fa9952 100644 (file)
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
                .name   = "sdhci-esdhc-imx",
                .owner  = THIS_MODULE,
                .of_match_table = imx_esdhc_dt_ids,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .id_table       = imx_esdhc_devtype,
        .probe          = sdhci_esdhc_imx_probe,
        .remove         = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_imx_init(void)
index 59e9d003e5891c0f9d76b8788055399bb1c3c070..01e5f627e0f047b2a7c63474a931fbedab0a1444 100644 (file)
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
                .name = "sdhci-esdhc",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_esdhc_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_esdhc_probe,
        .remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_init(void)
index 9b0d794a4f692911d05347fe917722635b96594b..3619adc7d9fc36548283386f82e08be2d195a608 100644 (file)
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
                .name = "sdhci-hlwd",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_hlwd_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_hlwd_probe,
        .remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_hlwd_init(void)
index d833d9c2f7e338425262402af53d6e2f2b63e848..6878a94626bc35b18bf493a4dcbf522dc3403255 100644 (file)
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
        int                     (*probe_slot) (struct sdhci_pci_slot *);
        void                    (*remove_slot) (struct sdhci_pci_slot *, int);
 
-       int                     (*suspend) (struct sdhci_pci_chip *,
-                                       pm_message_t);
+       int                     (*suspend) (struct sdhci_pci_chip *);
        int                     (*resume) (struct sdhci_pci_chip *);
 };
 
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
                jmicron_enable_mmc(slot->host, 0);
 }
 
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
 {
        int i;
 
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
 
 #ifdef CONFIG_PM
 
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                if (!slot)
                        continue;
 
-               ret = sdhci_suspend_host(slot->host, state);
+               ret = sdhci_suspend_host(slot->host);
 
                if (ret) {
                        for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                }
                pci_set_power_state(pdev, PCI_D3hot);
        } else {
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_disable_device(pdev);
-               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+               pci_set_power_state(pdev, PCI_D3hot);
        }
 
        return 0;
 }
 
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
-       pm_message_t state = { .event = PM_EVENT_SUSPEND };
        int i, ret;
 
        chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
 #endif
 
 static const struct dev_pm_ops sdhci_pci_pm_ops = {
+       .suspend = sdhci_pci_suspend,
+       .resume = sdhci_pci_resume,
        .runtime_suspend = sdhci_pci_runtime_suspend,
        .runtime_resume = sdhci_pci_runtime_resume,
        .runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
        .id_table =     pci_ids,
        .probe =        sdhci_pci_probe,
        .remove =       __devexit_p(sdhci_pci_remove),
-       .suspend =      sdhci_pci_suspend,
-       .resume =       sdhci_pci_resume,
        .driver =       {
                .pm =   &sdhci_pci_pm_ops
        },
index a9e12ea05583bfe3da78ef48f5442c3294367ff5..03970bcb3495ee7f542a12fc118074635dcd78ff 100644 (file)
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
 EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
 
 #ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, state);
+       return sdhci_suspend_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
 
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+       .suspend        = sdhci_pltfm_suspend,
+       .resume         = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
 #endif /* CONFIG_PM */
 
 static int __init sdhci_pltfm_drv_init(void)
index 3a9fc3f40840eb5a3a09704c2282863b32375405..37e0e184a0bbf1f23fe9ca6233f85341633802b6 100644 (file)
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
 extern int sdhci_pltfm_unregister(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
 #endif
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
index d4bf6d30c7baa3ea5a6c6bb1abc01d58e41ad853..7a039c3cb1f10a8651ae04091c8303008ad2cbe2 100644 (file)
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
        .driver         = {
                .name   = "sdhci-pxav2",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav2_probe,
        .remove         = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav2_init(void)
 {
index cff4ad3e7a59c94c599716bfcf3bf63b0f28d3d7..15673a7ee6a59a70d7455179f3afe30b3fb15588 100644 (file)
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
        .driver         = {
                .name   = "sdhci-pxav3",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav3_probe,
        .remove         = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav3_init(void)
 {
index 3d00e722efc9cec34e9cbdd1807863326f58e828..0d33ff0d67fbf7abf46f496c14e61dbdd3a9f6a4 100644 (file)
@@ -622,33 +622,38 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
 
 #ifdef CONFIG_PM
 
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, pm);
+       return sdhci_suspend_host(host);
 }
 
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
 
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+       .suspend        = sdhci_s3c_suspend,
+       .resume         = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
 #else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
 #endif
 
 static struct platform_driver sdhci_s3c_driver = {
        .probe          = sdhci_s3c_probe,
        .remove         = __devexit_p(sdhci_s3c_remove),
-       .suspend        = sdhci_s3c_suspend,
-       .resume         = sdhci_s3c_resume,
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "s3c-sdhci",
+               .pm     = SDHCI_S3C_PMOPS,
        },
 };
 
index 89699e861fc1c90c8c127ac798a3e208dd2d839e..e2e18d3f949c45dda79395646484a848a84d9326 100644 (file)
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
                .name   = "sdhci-tegra",
                .owner  = THIS_MODULE,
                .of_match_table = sdhci_tegra_dt_match,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_tegra_probe,
        .remove         = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_tegra_init(void)
index 6d8eea3235411e875250b3a8b7f588c2290304ec..19ed580f2cabf2fca09ae59d52cb86f5265f9ab8 100644 (file)
@@ -2327,7 +2327,7 @@ out:
 
 #ifdef CONFIG_PM
 
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
 {
        int ret;
 
index 0a5b65460d8a760de4c37cbdb2d9a89dd7cf430d..a04d4d0c6fd20911c29f59a4f0132eea2cffa6b7 100644 (file)
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
 extern void sdhci_remove_host(struct sdhci_host *host, int dead);
 
 #ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
 extern int sdhci_resume_host(struct sdhci_host *host);
 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
 #endif
index 369366c8e205e4ed8aa27746e3a8940b89e54287..d5505f3fe2a170d04fc9011c5b5d2ff886a31df7 100644 (file)
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (host->power) {
                        pm_runtime_put(&host->pd->dev);
                        host->power = false;
-                       if (p->down_pwr)
+                       if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
                                p->down_pwr(host->pd);
                }
                host->state = STATE_IDLE;
index d85a60cda16781ae25509a28705122597b87dd2a..4208b3958069119e68e8da8ad03e3eadf5862b8c 100644 (file)
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                /* start bus clock */
                tmio_mmc_clk_start(host);
        } else if (ios->power_mode != MMC_POWER_UP) {
-               if (host->set_pwr)
+               if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
                        host->set_pwr(host->pdev, 0);
                if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
                    pdata->power) {
index e8f6e65183d77d1a6eeb8d8137af21d57f07ca90..2ec978bc32ba47232b6618db96e2db1bebb25038 100644 (file)
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
 static int firmware_rom_wait_states = 0x1C;
 #endif
 
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
 MODULE_PARM_DESC(firmware_rom_wait_states,
                 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
 
index 94f553489725280d2ec851023f17745e86c48d9a..45876d0e5b8e500b321466fad5789d7a9bfbe182 100644 (file)
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
        if (!err)
                dev_info(&pdev->dev, "registered mtd device\n");
 
-       /* add the whole device. */
-       err = mtd_device_register(info->mtd, NULL, 0);
-       if (err)
-               dev_err(&pdev->dev, "failed to register the entire device\n");
+       if (pdata->nr_partitions) {
+               /* add the whole device. */
+               err = mtd_device_register(info->mtd, NULL, 0);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "failed to register the entire device\n");
+               }
+       }
 
        return err;
 
index 411a17df9fc13ee2fd78d8e54456e18095bdd91f..2a25b6789af4d5633ee7fcadf104fc08e9183928 100644 (file)
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
        }
        info->mtd->owner = THIS_MODULE;
 
-       mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+       mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
 
        platform_set_drvdata(pdev, info);
        return 0;
index 071b63420f0e213a2ba9df41495bce2ba468b2af..493ec2fcf97fe7f8c21e55420e4ecb3ddaa6c845 100644 (file)
@@ -21,9 +21,9 @@
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
-
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
index ee1713907b92b0684a64de524ca564214699f705..f8aacf48ecddf6c12a63a10026e8b948d5f0a8e2 100644 (file)
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
        if (!flash_np)
                return -ENODEV;
 
-       ppdata->of_node = flash_np;
+       ppdata.of_node = flash_np;
        ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
                        dev_name(&ndfc->ofdev->dev), flash_np->name);
        if (!ndfc->mtd.name) {
index 583f66cd5bbd63dc199468b87b96baf32582bcb1..9845afb37cc80a785cc3fe137ed6d0e729ccb589 100644 (file)
@@ -125,6 +125,8 @@ config IFB
          'ifb1' etc.
          Look at the iproute2 documentation directory for usage etc
 
+source "drivers/net/team/Kconfig"
+
 config MACVLAN
        tristate "MAC-VLAN support (EXPERIMENTAL)"
        depends on EXPERIMENTAL
@@ -241,10 +243,14 @@ source "drivers/atm/Kconfig"
 
 source "drivers/net/caif/Kconfig"
 
+source "drivers/net/dsa/Kconfig"
+
 source "drivers/net/ethernet/Kconfig"
 
 source "drivers/net/fddi/Kconfig"
 
+source "drivers/net/hippi/Kconfig"
+
 config NET_SB1000
        tristate "General Instruments Surfboard 1000"
        depends on PNP
index fa877cd2b1393cfd09d107355dc5c4f4526008dc..1988881853ab8437b7776493df00e063588e5f59 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o
 obj-$(CONFIG_NETCONSOLE) += netconsole.o
 obj-$(CONFIG_PHYLIB) += phy/
 obj-$(CONFIG_RIONET) += rionet.o
+obj-$(CONFIG_NET_TEAM) += team/
 obj-$(CONFIG_TUN) += tun.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
 obj-$(CONFIG_CAIF) += caif/
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_NET_DSA) += dsa/
 obj-$(CONFIG_ETHERNET) += ethernet/
 obj-$(CONFIG_FDDI) += fddi/
 obj-$(CONFIG_HIPPI) += hippi/
index a73d9dc80ff674d7b90de88226511470b9c8c873..84fb6349a59ab7d78c2ba37fe4d06538e5d52bd8 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig ARCNET
        depends on NETDEVICES && (ISA || PCI || PCMCIA)
-       bool "ARCnet support"
+       tristate "ARCnet support"
        ---help---
          If you have a network card of this type, say Y and check out the
          (arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
deleted file mode 100644 (file)
index 027a0ee..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/addrconf.h>
-#include <net/netns/generic.h>
-#include "bonding.h"
-
-/*
- * Assign bond->master_ipv6 to the next IPv6 address in the list, or
- * zero it out if there are none.
- */
-static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
-{
-       struct inet6_dev *idev;
-
-       if (!dev)
-               return;
-
-       idev = in6_dev_get(dev);
-       if (!idev)
-               return;
-
-       read_lock_bh(&idev->lock);
-       if (!list_empty(&idev->addr_list)) {
-               struct inet6_ifaddr *ifa
-                       = list_first_entry(&idev->addr_list,
-                                          struct inet6_ifaddr, if_list);
-               ipv6_addr_copy(addr, &ifa->addr);
-       } else
-               ipv6_addr_set(addr, 0, 0, 0, 0);
-
-       read_unlock_bh(&idev->lock);
-
-       in6_dev_put(idev);
-}
-
-static void bond_na_send(struct net_device *slave_dev,
-                        struct in6_addr *daddr,
-                        int router,
-                        unsigned short vlan_id)
-{
-       struct in6_addr mcaddr;
-       struct icmp6hdr icmp6h = {
-               .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
-       };
-       struct sk_buff *skb;
-
-       icmp6h.icmp6_router = router;
-       icmp6h.icmp6_solicited = 0;
-       icmp6h.icmp6_override = 1;
-
-       addrconf_addr_solict_mult(daddr, &mcaddr);
-
-       pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
-                slave_dev->name, &mcaddr, daddr);
-
-       skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
-                             ND_OPT_TARGET_LL_ADDR);
-
-       if (!skb) {
-               pr_err("NA packet allocation failed\n");
-               return;
-       }
-
-       if (vlan_id) {
-               /* The Ethernet header is not present yet, so it is
-                * too early to insert a VLAN tag.  Force use of an
-                * out-of-line tag here and let dev_hard_start_xmit()
-                * insert it if the slave hardware can't.
-                */
-               skb = __vlan_hwaccel_put_tag(skb, vlan_id);
-               if (!skb) {
-                       pr_err("failed to insert VLAN tag\n");
-                       return;
-               }
-       }
-
-       ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
-}
-
-/*
- * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
- * the bonding master.  This will help the switch learn our address
- * if in active-backup mode.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-void bond_send_unsolicited_na(struct bonding *bond)
-{
-       struct slave *slave = bond->curr_active_slave;
-       struct vlan_entry *vlan;
-       struct inet6_dev *idev;
-       int is_router;
-
-       pr_debug("%s: bond %s slave %s\n", bond->dev->name,
-                __func__, slave ? slave->dev->name : "NULL");
-
-       if (!slave || !bond->send_unsol_na ||
-           test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
-               return;
-
-       bond->send_unsol_na--;
-
-       idev = in6_dev_get(bond->dev);
-       if (!idev)
-               return;
-
-       is_router = !!idev->cnf.forwarding;
-
-       in6_dev_put(idev);
-
-       if (!ipv6_addr_any(&bond->master_ipv6))
-               bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
-
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-               if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
-                       bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
-                                    vlan->vlan_id);
-               }
-       }
-}
-
-/*
- * bond_inet6addr_event: handle inet6addr notifier chain events.
- *
- * We keep track of device IPv6 addresses primarily to use as source
- * addresses in NS probes.
- *
- * We track one IPv6 for the main device (if it has one).
- */
-static int bond_inet6addr_event(struct notifier_block *this,
-                               unsigned long event,
-                               void *ptr)
-{
-       struct inet6_ifaddr *ifa = ptr;
-       struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
-       struct bonding *bond;
-       struct vlan_entry *vlan;
-       struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
-
-       list_for_each_entry(bond, &bn->dev_list, bond_list) {
-               if (bond->dev == event_dev) {
-                       switch (event) {
-                       case NETDEV_UP:
-                               if (ipv6_addr_any(&bond->master_ipv6))
-                                       ipv6_addr_copy(&bond->master_ipv6,
-                                                      &ifa->addr);
-                               return NOTIFY_OK;
-                       case NETDEV_DOWN:
-                               if (ipv6_addr_equal(&bond->master_ipv6,
-                                                   &ifa->addr))
-                                       bond_glean_dev_ipv6(bond->dev,
-                                                           &bond->master_ipv6);
-                               return NOTIFY_OK;
-                       default:
-                               return NOTIFY_DONE;
-                       }
-               }
-
-               list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-                       rcu_read_lock();
-                       vlan_dev = __vlan_find_dev_deep(bond->dev,
-                                                       vlan->vlan_id);
-                       rcu_read_unlock();
-                       if (vlan_dev == event_dev) {
-                               switch (event) {
-                               case NETDEV_UP:
-                                       if (ipv6_addr_any(&vlan->vlan_ipv6))
-                                               ipv6_addr_copy(&vlan->vlan_ipv6,
-                                                              &ifa->addr);
-                                       return NOTIFY_OK;
-                               case NETDEV_DOWN:
-                                       if (ipv6_addr_equal(&vlan->vlan_ipv6,
-                                                           &ifa->addr))
-                                               bond_glean_dev_ipv6(vlan_dev,
-                                                                   &vlan->vlan_ipv6);
-                                       return NOTIFY_OK;
-                               default:
-                                       return NOTIFY_DONE;
-                               }
-                       }
-               }
-       }
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block bond_inet6addr_notifier = {
-       .notifier_call = bond_inet6addr_event,
-};
-
-void bond_register_ipv6_notifier(void)
-{
-       register_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
-void bond_unregister_ipv6_notifier(void)
-{
-       unregister_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
index b0c577256487b4b7c35d690da3a2ca619d2deb4c..435984ad8b2f1a8bedd54ba49eb4269f3991bd3a 100644 (file)
@@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being added
  */
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *stop_at;
        int i, res;
 
        bond_for_each_slave(bond, slave, i) {
-               struct net_device *slave_dev = slave->dev;
-               const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-               if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-                   slave_ops->ndo_vlan_rx_add_vid) {
-                       slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
-               }
+               res = vlan_vid_add(slave->dev, vid);
+               if (res)
+                       goto unwind;
        }
 
        res = bond_add_vlan(bond, vid);
        if (res) {
                pr_err("%s: Error: Failed to add vlan id %d\n",
                       bond_dev->name, vid);
+               return res;
        }
+
+       return 0;
+
+unwind:
+       /* unwind from head to the slave that failed */
+       stop_at = slave;
+       bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+               vlan_vid_del(slave->dev, vid);
+
+       return res;
 }
 
 /**
@@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being removed
  */
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
        int i, res;
 
-       bond_for_each_slave(bond, slave, i) {
-               struct net_device *slave_dev = slave->dev;
-               const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-               if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-                   slave_ops->ndo_vlan_rx_kill_vid) {
-                       slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
-               }
-       }
+       bond_for_each_slave(bond, slave, i)
+               vlan_vid_del(slave->dev, vid);
 
        res = bond_del_vlan(bond, vid);
        if (res) {
                pr_err("%s: Error: Failed to remove vlan id %d\n",
                       bond_dev->name, vid);
+               return res;
        }
+
+       return 0;
 }
 
 static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
 {
        struct vlan_entry *vlan;
-       const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-       if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-           !(slave_ops->ndo_vlan_rx_add_vid))
-               return;
+       int res;
 
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
-               slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+               res = vlan_vid_add(slave_dev, vlan->vlan_id);
+               if (res)
+                       pr_warning("%s: Failed to add vlan id %d to device %s\n",
+                                  bond->dev->name, vlan->vlan_id,
+                                  slave_dev->name);
+       }
 }
 
 static void bond_del_vlans_from_slave(struct bonding *bond,
                                      struct net_device *slave_dev)
 {
-       const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
        struct vlan_entry *vlan;
 
-       if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-           !(slave_ops->ndo_vlan_rx_kill_vid))
-               return;
-
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                if (!vlan->vlan_id)
                        continue;
-               slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+               vlan_vid_del(slave_dev, vlan->vlan_id);
        }
 }
 
@@ -1325,11 +1324,12 @@ static int bond_sethwaddr(struct net_device *bond_dev,
        return 0;
 }
 
-static u32 bond_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t bond_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct slave *slave;
        struct bonding *bond = netdev_priv(dev);
-       u32 mask;
+       netdev_features_t mask;
        int i;
 
        read_lock(&bond->lock);
@@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond)
 {
        struct slave *slave;
        struct net_device *bond_dev = bond->dev;
-       u32 vlan_features = BOND_VLAN_FEATURES;
+       netdev_features_t vlan_features = BOND_VLAN_FEATURES;
        unsigned short max_hard_header_len = ETH_HLEN;
        int i;
 
@@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                 "but new slave device does not support netpoll.\n",
                                 bond_dev->name);
                        res = -EBUSY;
-                       goto err_close;
+                       goto err_detach;
                }
        }
 #endif
@@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        res = bond_create_slave_symlinks(bond_dev, slave_dev);
        if (res)
-               goto err_close;
+               goto err_detach;
 
        res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
                                         new_slave);
@@ -1852,6 +1852,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 err_dest_symlinks:
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
 
+err_detach:
+       write_lock_bh(&bond->lock);
+       bond_detach_slave(bond, new_slave);
+       write_unlock_bh(&bond->lock);
+
 err_close:
        dev_close(slave_dev);
 
@@ -1897,7 +1902,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *oldcurrent;
        struct sockaddr addr;
-       u32 old_features = bond_dev->features;
+       netdev_features_t old_features = bond_dev->features;
 
        /* slave is not a slave or master is not master of this slave */
        if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2553,30 +2558,6 @@ re_arm:
        }
 }
 
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
-       struct in_device *idev;
-       struct in_ifaddr *ifa;
-       __be32 addr = 0;
-
-       if (!dev)
-               return 0;
-
-       rcu_read_lock();
-       idev = __in_dev_get_rcu(dev);
-       if (!idev)
-               goto out;
-
-       ifa = idev->ifa_list;
-       if (!ifa)
-               goto out;
-
-       addr = ifa->ifa_local;
-out:
-       rcu_read_unlock();
-       return addr;
-}
-
 static int bond_has_this_ip(struct bonding *bond, __be32 ip)
 {
        struct vlan_entry *vlan;
@@ -3322,6 +3303,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
        struct bonding *bond;
        struct vlan_entry *vlan;
 
+       /* we only care about primary address */
+       if(ifa->ifa_flags & IFA_F_SECONDARY)
+               return NOTIFY_DONE;
+
        list_for_each_entry(bond, &bn->dev_list, bond_list) {
                if (bond->dev == event_dev) {
                        switch (event) {
@@ -3329,7 +3314,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                                bond->master_ip = ifa->ifa_local;
                                return NOTIFY_OK;
                        case NETDEV_DOWN:
-                               bond->master_ip = bond_glean_dev_ip(bond->dev);
+                               bond->master_ip = 0;
                                return NOTIFY_OK;
                        default:
                                return NOTIFY_DONE;
@@ -3345,8 +3330,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                                        vlan->vlan_ip = ifa->ifa_local;
                                        return NOTIFY_OK;
                                case NETDEV_DOWN:
-                                       vlan->vlan_ip =
-                                               bond_glean_dev_ip(vlan_dev);
+                                       vlan->vlan_ip = 0;
                                        return NOTIFY_OK;
                                default:
                                        return NOTIFY_DONE;
@@ -4360,7 +4344,7 @@ static void bond_setup(struct net_device *bond_dev)
                                NETIF_F_HW_VLAN_RX |
                                NETIF_F_HW_VLAN_FILTER;
 
-       bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+       bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
        bond_dev->features |= bond_dev->hw_features;
 }
 
index 5a20804fdece8d1c281e310e49509973185a33ed..4ef7e2fd9fe6f5b577c13fc56a5b2ae45a54a0bd 100644 (file)
@@ -319,6 +319,13 @@ static ssize_t bonding_store_mode(struct device *d,
                goto out;
        }
 
+       if (bond->slave_cnt > 0) {
+               pr_err("unable to update mode of %s because it has slaves.\n",
+                       bond->dev->name);
+               ret = -EPERM;
+               goto out;
+       }
+
        new_value = bond_parse_parm(buf, bond_mode_tbl);
        if (new_value < 0)  {
                pr_err("%s: Ignoring invalid mode value %.*s.\n",
index 073352517adc20f043ac4024b791d2b45bce0766..0a4fc62a381dfb80652220e896f973e1618a5901 100644 (file)
@@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
        dev_dbg(&cfhsi->ndev->dev, "%s.\n",
                __func__);
 
-
-       ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
-       if (ret) {
-               dev_warn(&cfhsi->ndev->dev,
-                       "%s: can't wake up HSI interface: %d.\n",
-                       __func__, ret);
-               return ret;
-       }
-
        do {
                ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
                                &fifo_occupancy);
@@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
                }
        } while (1);
 
-       cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
        return ret;
 }
 
@@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 
                /* Create HSI frame. */
                len = cfhsi_tx_frm(desc, cfhsi);
-               BUG_ON(!len);
+               WARN_ON(!len);
 
                /* Set up new transfer. */
                res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
index 23406e62c0b031fc7298b32ec00ad37e9971faf0..8a3054b848125f76153d9bb53ccbaf19c1c36b3c 100644 (file)
@@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF);
 /*This list is protected by the rtnl lock. */
 static LIST_HEAD(ser_list);
 
-static int ser_loop;
+static bool ser_loop;
 module_param(ser_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
 
-static int ser_use_stx = 1;
+static bool ser_use_stx = true;
 module_param(ser_use_stx, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
 
-static int ser_use_fcs = 1;
+static bool ser_use_fcs = true;
 
 module_param(ser_use_fcs, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
@@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser)
                skb_pull(skb, tty_wr);
                if (skb->len == 0) {
                        struct sk_buff *tmp = skb_dequeue(&ser->head);
-                       BUG_ON(tmp != skb);
+                       WARN_ON(tmp != skb);
                        if (in_interrupt())
                                dev_kfree_skb_irq(skb);
                        else
@@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
 
        ser = tty->disc_data;
        BUG_ON(ser == NULL);
-       BUG_ON(ser->tty != tty);
+       WARN_ON(ser->tty != tty);
        handle_tx(ser);
 }
 
index d4b26fb24ed9ddb85c3d3cfdb42be74cbb75c17e..5b2041319a32b55907a555cae2d32eb7921d0db1 100644 (file)
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
                if ((avail_emptybuff > HIGH_WATERMARK) &&
                                        (!pshm_drv->tx_empty_available)) {
                        pshm_drv->tx_empty_available = 1;
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
                        pshm_drv->cfdev.flowctrl
                                        (pshm_drv->pshm_dev->pshm_netdev,
                                                                CAIF_FLOW_ON);
 
-                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
                        /* Schedule the work queue. if required */
                        if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        list_entry(pshm_drv->rx_full_list.next, struct buf_list,
                                        list);
                list_del_init(&pbuf->list);
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
                /* Retrieve pointer to start of the packet descriptor area. */
                pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        /* Get a suitable CAIF packet and copy in data. */
                        skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
                                                        frm_pck_len + 1);
-                       BUG_ON(skb == NULL);
+
+                       if (skb == NULL) {
+                               pr_info("OOM: Try next frame in descriptor\n");
+                               break;
+                       }
 
                        p = skb_put(skb, frm_pck_len);
                        memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        pck_desc++;
                }
 
+               spin_lock_irqsave(&pshm_drv->lock, flags);
                list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
 
                spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
 
                if (skb == NULL)
                        goto send_msg;
-
                /* Check the available no. of buffers in the empty list */
                list_for_each(pos, &pshm_drv->tx_empty_list)
                        avail_emptybuff++;
@@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                                        pshm_drv->tx_empty_available) {
                        /* Update blocking condition. */
                        pshm_drv->tx_empty_available = 0;
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
                        pshm_drv->cfdev.flowctrl
                                        (pshm_drv->pshm_dev->pshm_netdev,
                                        CAIF_FLOW_OFF);
+                       spin_lock_irqsave(&pshm_drv->lock, flags);
                }
                /*
                 * We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                        }
 
                        skb = skb_dequeue(&pshm_drv->sk_qhead);
+                       if (skb == NULL)
+                               break;
                        /* Copy in CAIF frame. */
                        skb_copy_bits(skb, 0, pbuf->desc_vptr +
                                        pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                        pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
                        pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
                                                                        frmlen;
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_irq(skb);
 
                        /* Fill in the shared memory packet descriptor area. */
                        pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@ send_msg:
 static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
 {
        struct shmdrv_layer *pshm_drv;
-       unsigned long flags = 0;
 
        pshm_drv = netdev_priv(shm_netdev);
 
-       spin_lock_irqsave(&pshm_drv->lock, flags);
-
        skb_queue_tail(&pshm_drv->sk_qhead, skb);
 
-       spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
        /* Schedule Tx work queue. for deferred processing of skbs*/
        if (!work_pending(&pshm_drv->shm_tx_work))
                queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
                                                (NR_TX_BUF * TX_BUF_SZ);
 
+       spin_lock_init(&pshm_drv->lock);
        INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
        INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
        INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
 
                if (pshm_dev->shm_loopback)
-                       tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+                       tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
                else
                        tx_buf->desc_vptr =
                                        ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                rx_buf->len = RX_BUF_SZ;
 
                if (pshm_dev->shm_loopback)
-                       rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+                       rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
                else
                        rx_buf->desc_vptr =
                                        ioremap(rx_buf->phy_addr, RX_BUF_SZ);
index 05e791f46aef7db07b5df2393db24d7a56108ab5..96391c36fa74c77a66b47add6cf188f069fcce0a 100644 (file)
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver");
 /* Returns the number of padding bytes for alignment. */
 #define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
 
-static int spi_loop;
+static bool spi_loop;
 module_param(spi_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
 
@@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
                        "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
 
        len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
-                          cfspi->xfer.va_tx,
+                          cfspi->xfer.va_tx[0],
                           (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
 
        len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev)
        netif_stop_queue(dev);
        return 0;
 }
-static const struct net_device_ops cfspi_ops = {
-       .ndo_open = cfspi_open,
-       .ndo_stop = cfspi_close,
-       .ndo_start_xmit = cfspi_xmit
-};
 
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
 {
+       int res = 0;
        struct cfspi *cfspi = netdev_priv(dev);
-       dev->features = 0;
-       dev->netdev_ops = &cfspi_ops;
-       dev->type = ARPHRD_CAIF;
-       dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-       dev->tx_queue_len = 0;
-       dev->mtu = SPI_MAX_PAYLOAD_SIZE;
-       dev->destructor = free_netdev;
-       skb_queue_head_init(&cfspi->qhead);
-       skb_queue_head_init(&cfspi->chead);
-       cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
-       cfspi->cfdev.use_frag = false;
-       cfspi->cfdev.use_stx = false;
-       cfspi->cfdev.use_fcs = false;
-       cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
-       struct cfspi *cfspi = NULL;
-       struct net_device *ndev;
-       struct cfspi_dev *dev;
-       int res;
-       dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
-       ndev = alloc_netdev(sizeof(struct cfspi),
-                       "cfspi%d", cfspi_setup);
-       if (!ndev)
-               return -ENOMEM;
-
-       cfspi = netdev_priv(ndev);
-       netif_stop_queue(ndev);
-       cfspi->ndev = ndev;
-       cfspi->pdev = pdev;
 
        /* Set flow info. */
        cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev)
                cfspi->slave_talked = false;
        }
 
-       /* Assign the SPI device. */
-       cfspi->dev = dev;
-       /* Assign the device ifc to this SPI interface. */
-       dev->ifc = &cfspi->ifc;
-
        /* Allocate DMA buffers. */
-       cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
-       if (!cfspi->xfer.va_tx) {
+       cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+       if (!cfspi->xfer.va_tx[0]) {
                res = -ENODEV;
-               goto err_dma_alloc_tx;
+               goto err_dma_alloc_tx_0;
        }
 
        cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev)
        /* Schedule the work queue. */
        queue_work(cfspi->wq, &cfspi->work);
 
+       return 0;
+
+ err_create_wq:
+       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+       dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+       return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+       struct cfspi *cfspi = netdev_priv(dev);
+
+       /* Remove from list. */
+       spin_lock(&cfspi_list_lock);
+       list_del(&cfspi->list);
+       spin_unlock(&cfspi_list_lock);
+
+       cfspi->ndev = NULL;
+       /* Free DMA buffers. */
+       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+       dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+       set_bit(SPI_TERMINATE, &cfspi->state);
+       wake_up_interruptible(&cfspi->wait);
+       destroy_workqueue(cfspi->wq);
+       /* Destroy debugfs directory and files. */
+       dev_debugfs_rem(cfspi);
+       return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+       .ndo_open = cfspi_open,
+       .ndo_stop = cfspi_close,
+       .ndo_init = cfspi_init,
+       .ndo_uninit = cfspi_uninit,
+       .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+       struct cfspi *cfspi = netdev_priv(dev);
+       dev->features = 0;
+       dev->netdev_ops = &cfspi_ops;
+       dev->type = ARPHRD_CAIF;
+       dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+       dev->tx_queue_len = 0;
+       dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+       dev->destructor = free_netdev;
+       skb_queue_head_init(&cfspi->qhead);
+       skb_queue_head_init(&cfspi->chead);
+       cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+       cfspi->cfdev.use_frag = false;
+       cfspi->cfdev.use_stx = false;
+       cfspi->cfdev.use_fcs = false;
+       cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+       struct cfspi *cfspi = NULL;
+       struct net_device *ndev;
+       struct cfspi_dev *dev;
+       int res;
+       dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+       ndev = alloc_netdev(sizeof(struct cfspi),
+                       "cfspi%d", cfspi_setup);
+       if (!dev)
+               return -ENODEV;
+
+       cfspi = netdev_priv(ndev);
+       netif_stop_queue(ndev);
+       cfspi->ndev = ndev;
+       cfspi->pdev = pdev;
+
+       /* Assign the SPI device. */
+       cfspi->dev = dev;
+       /* Assign the device ifc to this SPI interface. */
+       dev->ifc = &cfspi->ifc;
+
        /* Register network device. */
        res = register_netdev(ndev);
        if (res) {
@@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
        return res;
 
  err_net_reg:
-       dev_debugfs_rem(cfspi);
-       set_bit(SPI_TERMINATE, &cfspi->state);
-       wake_up_interruptible(&cfspi->wait);
-       destroy_workqueue(cfspi->wq);
- err_create_wq:
-       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
-       dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
        free_netdev(ndev);
 
        return res;
@@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
 
 int cfspi_spi_remove(struct platform_device *pdev)
 {
-       struct list_head *list_node;
-       struct list_head *n;
-       struct cfspi *cfspi = NULL;
-       struct cfspi_dev *dev;
-
-       dev = (struct cfspi_dev *)pdev->dev.platform_data;
-       spin_lock(&cfspi_list_lock);
-       list_for_each_safe(list_node, n, &cfspi_list) {
-               cfspi = list_entry(list_node, struct cfspi, list);
-               /* Find the corresponding device. */
-               if (cfspi->dev == dev) {
-                       /* Remove from list. */
-                       list_del(list_node);
-                       /* Free DMA buffers. */
-                       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
-                       dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
-                       set_bit(SPI_TERMINATE, &cfspi->state);
-                       wake_up_interruptible(&cfspi->wait);
-                       destroy_workqueue(cfspi->wq);
-                       /* Destroy debugfs directory and files. */
-                       dev_debugfs_rem(cfspi);
-                       unregister_netdev(cfspi->ndev);
-                       spin_unlock(&cfspi_list_lock);
-                       return 0;
-               }
-       }
-       spin_unlock(&cfspi_list_lock);
-       return -ENODEV;
+       /* Everything is done in cfspi_uninit(). */
+       return 0;
 }
 
 static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void)
 
        list_for_each_safe(list_node, n, &cfspi_list) {
                cfspi = list_entry(list_node, struct cfspi, list);
-               platform_device_unregister(cfspi->pdev);
+               unregister_netdev(cfspi->ndev);
        }
 
        /* Destroy sysfs files. */
index f6c98fb4a517c3940906360ad20e9b5b1bd7e790..ab45758c49a4938fbcee5e4f7304a7c76b59915e 100644 (file)
@@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig"
 
 source "drivers/net/can/c_can/Kconfig"
 
+source "drivers/net/can/cc770/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
 source "drivers/net/can/softing/Kconfig"
index 24ebfe8d758adf3568ceeab95c11a60116515c16..938be37b670cc7d9888b2287e5057e3b9cd0d3eb 100644 (file)
@@ -14,6 +14,7 @@ obj-y                         += softing/
 obj-$(CONFIG_CAN_SJA1000)      += sja1000/
 obj-$(CONFIG_CAN_MSCAN)                += mscan/
 obj-$(CONFIG_CAN_C_CAN)                += c_can/
+obj-$(CONFIG_CAN_CC770)                += cc770/
 obj-$(CONFIG_CAN_AT91)         += at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)      += ti_hecc.o
 obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
index 044ea0647b045206bc0f7f0ed6123c273232c665..6ea905c2cf6d1f030dd0893f0a1097e1d5aef03f 100644 (file)
@@ -1383,18 +1383,7 @@ static struct platform_driver at91_can_driver = {
        .id_table = at91_can_id_table,
 };
 
-static int __init at91_can_module_init(void)
-{
-       return platform_driver_register(&at91_can_driver);
-}
-
-static void __exit at91_can_module_exit(void)
-{
-       platform_driver_unregister(&at91_can_driver);
-}
-
-module_init(at91_can_module_init);
-module_exit(at91_can_module_exit);
+module_platform_driver(at91_can_driver);
 
 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
 MODULE_LICENSE("GPL v2");
index a1c5abc38cd26b9591083ef467a4b73ea67883ea..349e0fabb63abb8b589d49dceab652a73a52aca5 100644 (file)
@@ -676,17 +676,7 @@ static struct platform_driver bfin_can_driver = {
        },
 };
 
-static int __init bfin_can_init(void)
-{
-       return platform_driver_register(&bfin_can_driver);
-}
-module_init(bfin_can_init);
-
-static void __exit bfin_can_exit(void)
-{
-       platform_driver_unregister(&bfin_can_driver);
-}
-module_exit(bfin_can_exit);
+module_platform_driver(bfin_can_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_LICENSE("GPL");
index 0b5c6f8bdd347e2285a1fd68957114bed0fdc1a0..5e1a5ff6476ef0919e029640cb8890b78c2b55ab 100644 (file)
@@ -197,17 +197,7 @@ static struct platform_driver c_can_plat_driver = {
        .remove = __devexit_p(c_can_plat_remove),
 };
 
-static int __init c_can_plat_init(void)
-{
-       return platform_driver_register(&c_can_plat_driver);
-}
-module_init(c_can_plat_init);
-
-static void __exit c_can_plat_exit(void)
-{
-       platform_driver_unregister(&c_can_plat_driver);
-}
-module_exit(c_can_plat_exit);
+module_platform_driver(c_can_plat_driver);
 
 MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
new file mode 100644 (file)
index 0000000..22c07a8
--- /dev/null
@@ -0,0 +1,21 @@
+menuconfig CAN_CC770
+       tristate "Bosch CC770 and Intel AN82527 devices"
+       depends on CAN_DEV && HAS_IOMEM
+
+if CAN_CC770
+
+config CAN_CC770_ISA
+       tristate "ISA Bus based legacy CC770 driver"
+       ---help---
+         This driver adds legacy support for CC770 and AN82527 chips
+         connected to the ISA bus using I/O port, memory mapped or
+         indirect access.
+
+config CAN_CC770_PLATFORM
+       tristate "Generic Platform Bus based CC770 driver"
+       ---help---
+         This driver adds support for the CC770 and AN82527 chips
+         connected to the "platform bus" (Linux abstraction for directly
+         to the processor attached devices).
+
+endif
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
new file mode 100644 (file)
index 0000000..9fb8321
--- /dev/null
@@ -0,0 +1,9 @@
+#
+#  Makefile for the Bosch CC770 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_CC770) += cc770.o
+obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
+obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
new file mode 100644 (file)
index 0000000..7668967
--- /dev/null
@@ -0,0 +1,881 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
+
+/*
+ * The CC770 is a CAN controller from Bosch, which is 100% compatible
+ * with the AN82527 from Intel, but with "bugs" being fixed and some
+ * additional functionality, mainly:
+ *
+ * 1. RX and TX error counters are readable.
+ * 2. Support of silent (listen-only) mode.
+ * 3. Message object 15 can receive all types of frames, also RTR and EFF.
+ *
+ * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf",
+ * which explains in detail the compatibility between the CC770 and the
+ * 82527. This driver use the additional functionality 3. on real CC770
+ * devices. Unfortunately, the CC770 does still not store the message
+ * identifier of received remote transmission request frames and
+ * therefore it's set to 0.
+ *
+ * The message objects 1..14 can be used for TX and RX while the message
+ * objects 15 is optimized for RX. It has a shadow register for reliable
+ * data receiption under heavy bus load. Therefore it makes sense to use
+ * this message object for the needed use case. The frame type (EFF/SFF)
+ * for the message object 15 can be defined via kernel module parameter
+ * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
+ * otherwise 11 bit SFF messages.
+ */
+static int msgobj15_eff;
+module_param(msgobj15_eff, int, S_IRUGO);
+MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
+                "(default: 11-bit standard frames)");
+
+static int i82527_compat;
+module_param(i82527_compat, int, S_IRUGO);
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+                "without using additional functions");
+
+/*
+ * This driver uses the last 5 message objects 11..15. The definitions
+ * and structure below allows to configure and assign them to the real
+ * message object.
+ */
+static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
+       [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX,
+       [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF,
+       [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR,
+       [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR |
+                             CC770_OBJ_FLAG_EFF,
+       [CC770_OBJ_TX] = 0,
+};
+
+static struct can_bittiming_const cc770_bittiming_const = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 64,
+       .brp_inc = 1,
+};
+
+static inline int intid2obj(unsigned int intid)
+{
+       if (intid == 2)
+               return 0;
+       else
+               return MSGOBJ_LAST + 2 - intid;
+}
+
+static void enable_all_objs(const struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       u8 msgcfg;
+       unsigned char obj_flags;
+       unsigned int o, mo;
+
+       for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+               obj_flags = priv->obj_flags[o];
+               mo = obj2msgobj(o);
+
+               if (obj_flags & CC770_OBJ_FLAG_RX) {
+                       /*
+                        * We don't need extra objects for RTR and EFF if
+                        * the additional CC770 functions are enabled.
+                        */
+                       if (priv->control_normal_mode & CTRL_EAF) {
+                               if (o > 0)
+                                       continue;
+                               netdev_dbg(dev, "Message object %d for "
+                                          "RX data, RTR, SFF and EFF\n", mo);
+                       } else {
+                               netdev_dbg(dev,
+                                          "Message object %d for RX %s %s\n",
+                                          mo, obj_flags & CC770_OBJ_FLAG_RTR ?
+                                          "RTR" : "data",
+                                          obj_flags & CC770_OBJ_FLAG_EFF ?
+                                          "EFF" : "SFF");
+                       }
+
+                       if (obj_flags & CC770_OBJ_FLAG_EFF)
+                               msgcfg = MSGCFG_XTD;
+                       else
+                               msgcfg = 0;
+                       if (obj_flags & CC770_OBJ_FLAG_RTR)
+                               msgcfg |= MSGCFG_DIR;
+
+                       cc770_write_reg(priv, msgobj[mo].config, msgcfg);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_SET | TXIE_RES |
+                                       RXIE_SET | INTPND_RES);
+
+                       if (obj_flags & CC770_OBJ_FLAG_RTR)
+                               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                               NEWDAT_RES | CPUUPD_SET |
+                                               TXRQST_RES | RMTPND_RES);
+                       else
+                               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                               NEWDAT_RES | MSGLST_RES |
+                                               TXRQST_RES | RMTPND_RES);
+               } else {
+                       netdev_dbg(dev, "Message object %d for "
+                                  "TX data, RTR, SFF and EFF\n", mo);
+
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       RMTPND_RES | TXRQST_RES |
+                                       CPUUPD_RES | NEWDAT_RES);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_RES | TXIE_RES |
+                                       RXIE_RES | INTPND_RES);
+               }
+       }
+}
+
+static void disable_all_objs(const struct cc770_priv *priv)
+{
+       int o, mo;
+
+       for (o = 0; o <  ARRAY_SIZE(priv->obj_flags); o++) {
+               mo = obj2msgobj(o);
+
+               if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) {
+                       if (o > 0 && priv->control_normal_mode & CTRL_EAF)
+                               continue;
+
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       NEWDAT_RES | MSGLST_RES |
+                                       TXRQST_RES | RMTPND_RES);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_RES | TXIE_RES |
+                                       RXIE_RES | INTPND_RES);
+               } else {
+                       /* Clear message object for send */
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       RMTPND_RES | TXRQST_RES |
+                                       CPUUPD_RES | NEWDAT_RES);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_RES | TXIE_RES |
+                                       RXIE_RES | INTPND_RES);
+               }
+       }
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* Enable configuration and puts chip in bus-off, disable interrupts */
+       cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI);
+
+       priv->can.state = CAN_STATE_STOPPED;
+
+       /* Clear interrupts */
+       cc770_read_reg(priv, interrupt);
+
+       /* Clear status register */
+       cc770_write_reg(priv, status, 0);
+
+       /* Disable all used message objects */
+       disable_all_objs(priv);
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* Clear interrupts */
+       cc770_read_reg(priv, interrupt);
+
+       /* Clear status register and pre-set last error code */
+       cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+       /* Enable all used message objects*/
+       enable_all_objs(dev);
+
+       /*
+        * Clear bus-off, interrupts only for errors,
+        * not for status change
+        */
+       cc770_write_reg(priv, control, priv->control_normal_mode);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void chipset_init(struct cc770_priv *priv)
+{
+       int mo, id, data;
+
+       /* Enable configuration and put chip in bus-off, disable interrupts */
+       cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI));
+
+       /* Set CLKOUT divider and slew rates */
+       cc770_write_reg(priv, clkout, priv->clkout);
+
+       /* Configure CPU interface / CLKOUT enable */
+       cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+       /* Set bus configuration  */
+       cc770_write_reg(priv, bus_config, priv->bus_config);
+
+       /* Clear interrupts */
+       cc770_read_reg(priv, interrupt);
+
+       /* Clear status register */
+       cc770_write_reg(priv, status, 0);
+
+       /* Clear and invalidate message objects */
+       for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) {
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               INTPND_UNC | RXIE_RES |
+                               TXIE_RES | MSGVAL_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               INTPND_RES | RXIE_RES |
+                               TXIE_RES | MSGVAL_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                               NEWDAT_RES | MSGLST_RES |
+                               TXRQST_RES | RMTPND_RES);
+               for (data = 0; data < 8; data++)
+                       cc770_write_reg(priv, msgobj[mo].data[data], 0);
+               for (id = 0; id < 4; id++)
+                       cc770_write_reg(priv, msgobj[mo].id[id], 0);
+               cc770_write_reg(priv, msgobj[mo].config, 0);
+       }
+
+       /* Set all global ID masks to "don't care" */
+       cc770_write_reg(priv, global_mask_std[0], 0);
+       cc770_write_reg(priv, global_mask_std[1], 0);
+       cc770_write_reg(priv, global_mask_ext[0], 0);
+       cc770_write_reg(priv, global_mask_ext[1], 0);
+       cc770_write_reg(priv, global_mask_ext[2], 0);
+       cc770_write_reg(priv, global_mask_ext[3], 0);
+
+}
+
+static int cc770_probe_chip(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* Enable configuration, put chip in bus-off, disable ints */
+       cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI);
+       /* Configure cpu interface / CLKOUT disable */
+       cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+       /*
+        * Check if hardware reset is still inactive or maybe there
+        * is no chip in this address space
+        */
+       if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) {
+               netdev_info(dev, "probing @0x%p failed (reset)\n",
+                           priv->reg_base);
+               return -ENODEV;
+       }
+
+       /* Write and read back test pattern (some arbitrary values) */
+       cc770_write_reg(priv, msgobj[1].data[1], 0x25);
+       cc770_write_reg(priv, msgobj[2].data[3], 0x52);
+       cc770_write_reg(priv, msgobj[10].data[6], 0xc3);
+       if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) ||
+           (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) ||
+           (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) {
+               netdev_info(dev, "probing @0x%p failed (pattern)\n",
+                           priv->reg_base);
+               return -ENODEV;
+       }
+
+       /* Check if this chip is a CC770 supporting additional functions */
+       if (cc770_read_reg(priv, control) & CTRL_EAF)
+               priv->control_normal_mode |= CTRL_EAF;
+
+       return 0;
+}
+
+static void cc770_start(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* leave reset mode */
+       if (priv->can.state != CAN_STATE_STOPPED)
+               set_reset_mode(dev);
+
+       /* leave reset mode */
+       set_normal_mode(dev);
+}
+
+static int cc770_set_mode(struct net_device *dev, enum can_mode mode)
+{
+       switch (mode) {
+       case CAN_MODE_START:
+               cc770_start(dev);
+               netif_wake_queue(dev);
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int cc770_set_bittiming(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       u8 btr0, btr1;
+
+       btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+       btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+               (((bt->phase_seg2 - 1) & 0x7) << 4);
+       if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               btr1 |= 0x80;
+
+       netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+       cc770_write_reg(priv, bit_timing_0, btr0);
+       cc770_write_reg(priv, bit_timing_1, btr1);
+
+       return 0;
+}
+
+static int cc770_get_berr_counter(const struct net_device *dev,
+                                 struct can_berr_counter *bec)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       bec->txerr = cc770_read_reg(priv, tx_error_counter);
+       bec->rxerr = cc770_read_reg(priv, rx_error_counter);
+
+       return 0;
+}
+
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+       u8 dlc, rtr;
+       u32 id;
+       int i;
+
+       if (can_dropped_invalid_skb(dev, skb))
+               return NETDEV_TX_OK;
+
+       if ((cc770_read_reg(priv,
+                           msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+               netdev_err(dev, "TX register is still occupied!\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       netif_stop_queue(dev);
+
+       dlc = cf->can_dlc;
+       id = cf->can_id;
+       if (cf->can_id & CAN_RTR_FLAG)
+               rtr = 0;
+       else
+               rtr = MSGCFG_DIR;
+       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                       RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+       if (id & CAN_EFF_FLAG) {
+               id &= CAN_EFF_MASK;
+               cc770_write_reg(priv, msgobj[mo].config,
+                               (dlc << 4) | rtr | MSGCFG_XTD);
+               cc770_write_reg(priv, msgobj[mo].id[3], id << 3);
+               cc770_write_reg(priv, msgobj[mo].id[2], id >> 5);
+               cc770_write_reg(priv, msgobj[mo].id[1], id >> 13);
+               cc770_write_reg(priv, msgobj[mo].id[0], id >> 21);
+       } else {
+               id &= CAN_SFF_MASK;
+               cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr);
+               cc770_write_reg(priv, msgobj[mo].id[0], id >> 3);
+               cc770_write_reg(priv, msgobj[mo].id[1], id << 5);
+       }
+
+       for (i = 0; i < dlc; i++)
+               cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                       RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+
+       stats->tx_bytes += dlc;
+
+       can_put_echo_skb(skb, dev, 0);
+
+       /*
+        * HM: We had some cases of repeated IRQs so make sure the
+        * INT is acknowledged I know it's already further up, but
+        * doing again fixed the issue
+        */
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+       return NETDEV_TX_OK;
+}
+
+static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u8 config;
+       u32 id;
+       int i;
+
+       skb = alloc_can_skb(dev, &cf);
+       if (!skb)
+               return;
+
+       config = cc770_read_reg(priv, msgobj[mo].config);
+
+       if (ctrl1 & RMTPND_SET) {
+               /*
+                * Unfortunately, the chip does not store the real message
+                * identifier of the received remote transmission request
+                * frame. Therefore we set it to 0.
+                */
+               cf->can_id = CAN_RTR_FLAG;
+               if (config & MSGCFG_XTD)
+                       cf->can_id |= CAN_EFF_FLAG;
+               cf->can_dlc = 0;
+       } else {
+               if (config & MSGCFG_XTD) {
+                       id = cc770_read_reg(priv, msgobj[mo].id[3]);
+                       id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8;
+                       id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16;
+                       id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24;
+                       id >>= 3;
+                       id |= CAN_EFF_FLAG;
+               } else {
+                       id = cc770_read_reg(priv, msgobj[mo].id[1]);
+                       id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8;
+                       id >>= 5;
+               }
+
+               cf->can_id = id;
+               cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
+               for (i = 0; i < cf->can_dlc; i++)
+                       cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
+       }
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+}
+
+static int cc770_err(struct net_device *dev, u8 status)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u8 lec;
+
+       netdev_dbg(dev, "status interrupt (%#x)\n", status);
+
+       skb = alloc_can_err_skb(dev, &cf);
+       if (!skb)
+               return -ENOMEM;
+
+       /* Use extended functions of the CC770 */
+       if (priv->control_normal_mode & CTRL_EAF) {
+               cf->data[6] = cc770_read_reg(priv, tx_error_counter);
+               cf->data[7] = cc770_read_reg(priv, rx_error_counter);
+       }
+
+       if (status & STAT_BOFF) {
+               /* Disable interrupts */
+               cc770_write_reg(priv, control, CTRL_INI);
+               cf->can_id |= CAN_ERR_BUSOFF;
+               priv->can.state = CAN_STATE_BUS_OFF;
+               can_bus_off(dev);
+       } else if (status & STAT_WARN) {
+               cf->can_id |= CAN_ERR_CRTL;
+               /* Only the CC770 does show error passive */
+               if (cf->data[7] > 127) {
+                       cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE |
+                               CAN_ERR_CRTL_TX_PASSIVE;
+                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
+                       priv->can.can_stats.error_passive++;
+               } else {
+                       cf->data[1] = CAN_ERR_CRTL_RX_WARNING |
+                               CAN_ERR_CRTL_TX_WARNING;
+                       priv->can.state = CAN_STATE_ERROR_WARNING;
+                       priv->can.can_stats.error_warning++;
+               }
+       } else {
+               /* Back to error avtive */
+               cf->can_id |= CAN_ERR_PROT;
+               cf->data[2] = CAN_ERR_PROT_ACTIVE;
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       }
+
+       lec = status & STAT_LEC_MASK;
+       if (lec < 7 && lec > 0) {
+               if (lec == STAT_LEC_ACK) {
+                       cf->can_id |= CAN_ERR_ACK;
+               } else {
+                       cf->can_id |= CAN_ERR_PROT;
+                       switch (lec) {
+                       case STAT_LEC_STUFF:
+                               cf->data[2] |= CAN_ERR_PROT_STUFF;
+                               break;
+                       case STAT_LEC_FORM:
+                               cf->data[2] |= CAN_ERR_PROT_FORM;
+                               break;
+                       case STAT_LEC_BIT1:
+                               cf->data[2] |= CAN_ERR_PROT_BIT1;
+                               break;
+                       case STAT_LEC_BIT0:
+                               cf->data[2] |= CAN_ERR_PROT_BIT0;
+                               break;
+                       case STAT_LEC_CRC:
+                               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+                               break;
+                       }
+               }
+       }
+
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+
+       return 0;
+}
+
+static int cc770_status_interrupt(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       u8 status;
+
+       status = cc770_read_reg(priv, status);
+       /* Reset the status register including RXOK and TXOK */
+       cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+       if (status & (STAT_WARN | STAT_BOFF) ||
+           (status & STAT_LEC_MASK) != STAT_LEC_MASK) {
+               cc770_err(dev, status);
+               return status & STAT_BOFF;
+       }
+
+       return 0;
+}
+
+static void cc770_rx_interrupt(struct net_device *dev, unsigned int o)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       unsigned int mo = obj2msgobj(o);
+       u8 ctrl1;
+       int n = CC770_MAX_MSG;
+
+       while (n--) {
+               ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+               if (!(ctrl1 & NEWDAT_SET))  {
+                       /* Check for RTR if additional functions are enabled */
+                       if (priv->control_normal_mode & CTRL_EAF) {
+                               if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) &
+                                     INTPND_SET))
+                                       break;
+                       } else {
+                               break;
+                       }
+               }
+
+               if (ctrl1 & MSGLST_SET) {
+                       stats->rx_over_errors++;
+                       stats->rx_errors++;
+               }
+               if (mo < MSGOBJ_LAST)
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       NEWDAT_RES | MSGLST_RES |
+                                       TXRQST_UNC | RMTPND_UNC);
+               cc770_rx(dev, mo, ctrl1);
+
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               MSGVAL_SET | TXIE_RES |
+                               RXIE_SET | INTPND_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                               NEWDAT_RES | MSGLST_RES |
+                               TXRQST_RES | RMTPND_RES);
+       }
+}
+
+static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       unsigned int mo = obj2msgobj(o);
+       u8 ctrl0, ctrl1;
+       int n = CC770_MAX_MSG;
+
+       while (n--) {
+               ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0);
+               if (!(ctrl0 & INTPND_SET))
+                       break;
+
+               ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+               cc770_rx(dev, mo, ctrl1);
+
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               MSGVAL_SET | TXIE_RES |
+                               RXIE_SET | INTPND_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                               NEWDAT_RES | CPUUPD_SET |
+                               TXRQST_RES | RMTPND_RES);
+       }
+}
+
+static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       unsigned int mo = obj2msgobj(o);
+
+       /* Nothing more to send, switch off interrupts */
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+       /*
+        * We had some cases of repeated IRQ so make sure the
+        * INT is acknowledged
+        */
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+       stats->tx_packets++;
+       can_get_echo_skb(dev, 0);
+       netif_wake_queue(dev);
+}
+
+irqreturn_t cc770_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct cc770_priv *priv = netdev_priv(dev);
+       u8 intid;
+       int o, n = 0;
+
+       /* Shared interrupts and IRQ off? */
+       if (priv->can.state == CAN_STATE_STOPPED)
+               return IRQ_NONE;
+
+       if (priv->pre_irq)
+               priv->pre_irq(priv);
+
+       while (n < CC770_MAX_IRQ) {
+               /* Read the highest pending interrupt request */
+               intid = cc770_read_reg(priv, interrupt);
+               if (!intid)
+                       break;
+               n++;
+
+               if (intid == 1) {
+                       /* Exit in case of bus-off */
+                       if (cc770_status_interrupt(dev))
+                               break;
+               } else {
+                       o = intid2obj(intid);
+
+                       if (o >= CC770_OBJ_MAX) {
+                               netdev_err(dev, "Unexpected interrupt id %d\n",
+                                          intid);
+                               continue;
+                       }
+
+                       if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR)
+                               cc770_rtr_interrupt(dev, o);
+                       else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX)
+                               cc770_rx_interrupt(dev, o);
+                       else
+                               cc770_tx_interrupt(dev, o);
+               }
+       }
+
+       if (priv->post_irq)
+               priv->post_irq(priv);
+
+       if (n >= CC770_MAX_IRQ)
+               netdev_dbg(dev, "%d messages handled in ISR", n);
+
+       return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cc770_open(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       int err;
+
+       /* set chip into reset mode */
+       set_reset_mode(dev);
+
+       /* common open */
+       err = open_candev(dev);
+       if (err)
+               return err;
+
+       err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags,
+                         dev->name, dev);
+       if (err) {
+               close_candev(dev);
+               return -EAGAIN;
+       }
+
+       /* init and start chip */
+       cc770_start(dev);
+
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+static int cc770_close(struct net_device *dev)
+{
+       netif_stop_queue(dev);
+       set_reset_mode(dev);
+
+       free_irq(dev->irq, dev);
+       close_candev(dev);
+
+       return 0;
+}
+
+struct net_device *alloc_cc770dev(int sizeof_priv)
+{
+       struct net_device *dev;
+       struct cc770_priv *priv;
+
+       dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv,
+                          CC770_ECHO_SKB_MAX);
+       if (!dev)
+               return NULL;
+
+       priv = netdev_priv(dev);
+
+       priv->dev = dev;
+       priv->can.bittiming_const = &cc770_bittiming_const;
+       priv->can.do_set_bittiming = cc770_set_bittiming;
+       priv->can.do_set_mode = cc770_set_mode;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+       memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+       if (sizeof_priv)
+               priv->priv = (void *)priv + sizeof(struct cc770_priv);
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_cc770dev);
+
+void free_cc770dev(struct net_device *dev)
+{
+       free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_cc770dev);
+
+static const struct net_device_ops cc770_netdev_ops = {
+       .ndo_open = cc770_open,
+       .ndo_stop = cc770_close,
+       .ndo_start_xmit = cc770_start_xmit,
+};
+
+int register_cc770dev(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = cc770_probe_chip(dev);
+       if (err)
+               return err;
+
+       dev->netdev_ops = &cc770_netdev_ops;
+
+       dev->flags |= IFF_ECHO; /* we support local echo */
+
+       /* Should we use additional functions? */
+       if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) {
+               priv->can.do_get_berr_counter = cc770_get_berr_counter;
+               priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE;
+               netdev_dbg(dev, "i82527 mode with additional functions\n");
+       } else {
+               priv->control_normal_mode = CTRL_IE | CTRL_EIE;
+               netdev_dbg(dev, "strict i82527 compatibility mode\n");
+       }
+
+       chipset_init(priv);
+       set_reset_mode(dev);
+
+       return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_cc770dev);
+
+void unregister_cc770dev(struct net_device *dev)
+{
+       set_reset_mode(dev);
+       unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_cc770dev);
+
+static __init int cc770_init(void)
+{
+       if (msgobj15_eff) {
+               cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF;
+               cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF;
+       }
+
+       pr_info("CAN netdevice driver\n");
+
+       return 0;
+}
+module_init(cc770_init);
+
+static __exit void cc770_exit(void)
+{
+       pr_info("driver removed\n");
+}
+module_exit(cc770_exit);
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
new file mode 100644 (file)
index 0000000..a1739db
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CC770_DEV_H
+#define CC770_DEV_H
+
+#include <linux/can/dev.h>
+
+struct cc770_msgobj {
+       u8 ctrl0;
+       u8 ctrl1;
+       u8 id[4];
+       u8 config;
+       u8 data[8];
+       u8 dontuse;             /* padding */
+} __packed;
+
+struct cc770_regs {
+       union {
+               struct cc770_msgobj msgobj[16]; /* Message object 1..15 */
+               struct {
+                       u8 control;             /* Control Register */
+                       u8 status;              /* Status Register */
+                       u8 cpu_interface;       /* CPU Interface Register */
+                       u8 dontuse1;
+                       u8 high_speed_read[2];  /* High Speed Read */
+                       u8 global_mask_std[2];  /* Standard Global Mask */
+                       u8 global_mask_ext[4];  /* Extended Global Mask */
+                       u8 msg15_mask[4];       /* Message 15 Mask */
+                       u8 dontuse2[15];
+                       u8 clkout;              /* Clock Out Register */
+                       u8 dontuse3[15];
+                       u8 bus_config;          /* Bus Configuration Register */
+                       u8 dontuse4[15];
+                       u8 bit_timing_0;        /* Bit Timing Register byte 0 */
+                       u8 dontuse5[15];
+                       u8 bit_timing_1;        /* Bit Timing Register byte 1 */
+                       u8 dontuse6[15];
+                       u8 interrupt;           /* Interrupt Register */
+                       u8 dontuse7[15];
+                       u8 rx_error_counter;    /* Receive Error Counter */
+                       u8 dontuse8[15];
+                       u8 tx_error_counter;    /* Transmit Error Counter */
+                       u8 dontuse9[31];
+                       u8 p1_conf;
+                       u8 dontuse10[15];
+                       u8 p2_conf;
+                       u8 dontuse11[15];
+                       u8 p1_in;
+                       u8 dontuse12[15];
+                       u8 p2_in;
+                       u8 dontuse13[15];
+                       u8 p1_out;
+                       u8 dontuse14[15];
+                       u8 p2_out;
+                       u8 dontuse15[15];
+                       u8 serial_reset_addr;
+               };
+       };
+} __packed;
+
+/* Control Register (0x00) */
+#define CTRL_INI       0x01    /* Initialization */
+#define CTRL_IE                0x02    /* Interrupt Enable */
+#define CTRL_SIE       0x04    /* Status Interrupt Enable */
+#define CTRL_EIE       0x08    /* Error Interrupt Enable */
+#define CTRL_EAF       0x20    /* Enable additional functions */
+#define CTRL_CCE       0x40    /* Change Configuration Enable */
+
+/* Status Register (0x01) */
+#define STAT_LEC_STUFF 0x01    /* Stuff error */
+#define STAT_LEC_FORM  0x02    /* Form error */
+#define STAT_LEC_ACK   0x03    /* Acknowledgement error */
+#define STAT_LEC_BIT1  0x04    /* Bit1 error */
+#define STAT_LEC_BIT0  0x05    /* Bit0 error */
+#define STAT_LEC_CRC   0x06    /* CRC error */
+#define STAT_LEC_MASK  0x07    /* Last Error Code mask */
+#define STAT_TXOK      0x08    /* Transmit Message Successfully */
+#define STAT_RXOK      0x10    /* Receive Message Successfully */
+#define STAT_WAKE      0x20    /* Wake Up Status */
+#define STAT_WARN      0x40    /* Warning Status */
+#define STAT_BOFF      0x80    /* Bus Off Status */
+
+/*
+ * CPU Interface Register (0x02)
+ * Clock Out Register (0x1f)
+ * Bus Configuration Register (0x2f)
+ *
+ * see include/linux/can/platform/cc770.h
+ */
+
+/* Message Control Register 0 (Base Address + 0x0) */
+#define INTPND_RES     0x01    /* No Interrupt pending */
+#define INTPND_SET     0x02    /* Interrupt pending */
+#define INTPND_UNC     0x03
+#define RXIE_RES       0x04    /* Receive Interrupt Disable */
+#define RXIE_SET       0x08    /* Receive Interrupt Enable */
+#define RXIE_UNC       0x0c
+#define TXIE_RES       0x10    /* Transmit Interrupt Disable */
+#define TXIE_SET       0x20    /* Transmit Interrupt Enable */
+#define TXIE_UNC       0x30
+#define MSGVAL_RES     0x40    /* Message Invalid */
+#define MSGVAL_SET     0x80    /* Message Valid */
+#define MSGVAL_UNC     0xc0
+
+/* Message Control Register 1 (Base Address + 0x01) */
+#define NEWDAT_RES     0x01    /* No New Data */
+#define NEWDAT_SET     0x02    /* New Data */
+#define NEWDAT_UNC     0x03
+#define MSGLST_RES     0x04    /* No Message Lost */
+#define MSGLST_SET     0x08    /* Message Lost */
+#define MSGLST_UNC     0x0c
+#define CPUUPD_RES     0x04    /* No CPU Updating */
+#define CPUUPD_SET     0x08    /* CPU Updating */
+#define CPUUPD_UNC     0x0c
+#define TXRQST_RES     0x10    /* No Transmission Request */
+#define TXRQST_SET     0x20    /* Transmission Request */
+#define TXRQST_UNC     0x30
+#define RMTPND_RES     0x40    /* No Remote Request Pending */
+#define RMTPND_SET     0x80    /* Remote Request Pending */
+#define RMTPND_UNC     0xc0
+
+/* Message Configuration Register (Base Address + 0x06) */
+#define MSGCFG_XTD     0x04    /* Extended Identifier */
+#define MSGCFG_DIR     0x08    /* Direction is Transmit */
+
+#define MSGOBJ_FIRST   1
+#define MSGOBJ_LAST    15
+
+#define CC770_IO_SIZE  0x100
+#define CC770_MAX_IRQ  20      /* max. number of interrupts handled in ISR */
+#define CC770_MAX_MSG  4       /* max. number of messages handled in ISR */
+
+#define CC770_ECHO_SKB_MAX     1
+
+#define cc770_read_reg(priv, member)                                   \
+       priv->read_reg(priv, offsetof(struct cc770_regs, member))
+
+#define cc770_write_reg(priv, member, value)                           \
+       priv->write_reg(priv, offsetof(struct cc770_regs, member), value)
+
+/*
+ * Message objects and flags used by this driver
+ */
+#define CC770_OBJ_FLAG_RX      0x01
+#define CC770_OBJ_FLAG_RTR     0x02
+#define CC770_OBJ_FLAG_EFF     0x04
+
+enum {
+       CC770_OBJ_RX0 = 0,      /* for receiving normal messages */
+       CC770_OBJ_RX1,          /* for receiving normal messages */
+       CC770_OBJ_RX_RTR0,      /* for receiving remote transmission requests */
+       CC770_OBJ_RX_RTR1,      /* for receiving remote transmission requests */
+       CC770_OBJ_TX,           /* for sending messages */
+       CC770_OBJ_MAX
+};
+
+#define obj2msgobj(o)  (MSGOBJ_LAST - (o)) /* message object 11..15 */
+
+/*
+ * CC770 private data structure
+ */
+struct cc770_priv {
+       struct can_priv can;    /* must be the first member */
+       struct sk_buff *echo_skb;
+
+       /* the lower-layer is responsible for appropriate locking */
+       u8 (*read_reg)(const struct cc770_priv *priv, int reg);
+       void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val);
+       void (*pre_irq)(const struct cc770_priv *priv);
+       void (*post_irq)(const struct cc770_priv *priv);
+
+       void *priv;             /* for board-specific data */
+       struct net_device *dev;
+
+       void __iomem *reg_base;  /* ioremap'ed address to registers */
+       unsigned long irq_flags; /* for request_irq() */
+
+       unsigned char obj_flags[CC770_OBJ_MAX];
+       u8 control_normal_mode; /* Control register for normal mode */
+       u8 cpu_interface;       /* CPU interface register */
+       u8 clkout;              /* Clock out register */
+       u8 bus_config;          /* Bus conffiguration register */
+};
+
+struct net_device *alloc_cc770dev(int sizeof_priv);
+void free_cc770dev(struct net_device *dev);
+int register_cc770dev(struct net_device *dev);
+void unregister_cc770dev(struct net_device *dev);
+
+#endif /* CC770_DEV_H */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
new file mode 100644 (file)
index 0000000..4be5fe2
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus.
+ * The I/O port or memory address and the IRQ number must be specified via
+ * module parameters:
+ *
+ *   insmod cc770_isa.ko port=0x310,0x380 irq=7,11
+ *
+ * for ISA devices using I/O ports or:
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11
+ *
+ * for memory mapped ISA devices.
+ *
+ * Indirect access via address and data port is supported as well:
+ *
+ *   insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11
+ *
+ * Furthermore, the following mode parameter can be defined:
+ *
+ *   clk: External oscillator clock frequency (default=16000000 [16 MHz])
+ *   cir: CPU interface register (default=0x40 [DSC])
+ *   bcr: Bus configuration register (default=0x40 [CBY])
+ *   cor: Clockout register (default=0x00)
+ *
+ * Note: for clk, cir, bcr and cor, the first argument re-defines the
+ * default for all other devices, e.g.:
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000
+ *
+ * is equivalent to
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define MAXDEV 8
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus");
+MODULE_LICENSE("GPL v2");
+
+#define CLK_DEFAULT    16000000        /* 16 MHz */
+#define COR_DEFAULT    0x00
+#define BCR_DEFAULT    BUSCFG_CBY
+
+static unsigned long port[MAXDEV];
+static unsigned long mem[MAXDEV];
+static int __devinitdata irq[MAXDEV];
+static int __devinitdata clk[MAXDEV];
+static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+
+module_param_array(port, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(port, "I/O port number");
+
+module_param_array(mem, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(mem, "I/O memory address");
+
+module_param_array(indirect, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
+
+module_param_array(irq, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+module_param_array(clk, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(clk, "External oscillator clock frequency "
+                "(default=16000000 [16 MHz])");
+
+module_param_array(cir, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
+
+module_param_array(cor, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
+
+module_param_array(bcr, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
+
+#define CC770_IOSIZE          0x20
+#define CC770_IOSIZE_INDIRECT 0x02
+
+static struct platform_device *cc770_isa_devs[MAXDEV];
+
+static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
+{
+       return readb(priv->reg_base + reg);
+}
+
+static void cc770_isa_mem_write_reg(const struct cc770_priv *priv,
+                                     int reg, u8 val)
+{
+       writeb(val, priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg)
+{
+       return inb((unsigned long)priv->reg_base + reg);
+}
+
+static void cc770_isa_port_write_reg(const struct cc770_priv *priv,
+                                      int reg, u8 val)
+{
+       outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
+                                            int reg)
+{
+       unsigned long base = (unsigned long)priv->reg_base;
+
+       outb(reg, base);
+       return inb(base + 1);
+}
+
+static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
+                                               int reg, u8 val)
+{
+       unsigned long base = (unsigned long)priv->reg_base;
+
+       outb(reg, base);
+       outb(val, base + 1);
+}
+
+static int __devinit cc770_isa_probe(struct platform_device *pdev)
+{
+       struct net_device *dev;
+       struct cc770_priv *priv;
+       void __iomem *base = NULL;
+       int iosize = CC770_IOSIZE;
+       int idx = pdev->id;
+       int err;
+       u32 clktmp;
+
+       dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+               idx, port[idx], mem[idx], irq[idx]);
+       if (mem[idx]) {
+               if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) {
+                       err = -EBUSY;
+                       goto exit;
+               }
+               base = ioremap_nocache(mem[idx], iosize);
+               if (!base) {
+                       err = -ENOMEM;
+                       goto exit_release;
+               }
+       } else {
+               if (indirect[idx] > 0 ||
+                   (indirect[idx] == -1 && indirect[0] > 0))
+                       iosize = CC770_IOSIZE_INDIRECT;
+               if (!request_region(port[idx], iosize, KBUILD_MODNAME)) {
+                       err = -EBUSY;
+                       goto exit;
+               }
+       }
+
+       dev = alloc_cc770dev(0);
+       if (!dev) {
+               err = -ENOMEM;
+               goto exit_unmap;
+       }
+       priv = netdev_priv(dev);
+
+       dev->irq = irq[idx];
+       priv->irq_flags = IRQF_SHARED;
+       if (mem[idx]) {
+               priv->reg_base = base;
+               dev->base_addr = mem[idx];
+               priv->read_reg = cc770_isa_mem_read_reg;
+               priv->write_reg = cc770_isa_mem_write_reg;
+       } else {
+               priv->reg_base = (void __iomem *)port[idx];
+               dev->base_addr = port[idx];
+
+               if (iosize == CC770_IOSIZE_INDIRECT) {
+                       priv->read_reg = cc770_isa_port_read_reg_indirect;
+                       priv->write_reg = cc770_isa_port_write_reg_indirect;
+               } else {
+                       priv->read_reg = cc770_isa_port_read_reg;
+                       priv->write_reg = cc770_isa_port_write_reg;
+               }
+       }
+
+       if (clk[idx])
+               clktmp = clk[idx];
+       else if (clk[0])
+               clktmp = clk[0];
+       else
+               clktmp = CLK_DEFAULT;
+       priv->can.clock.freq = clktmp;
+
+       if (cir[idx] != 0xff) {
+               priv->cpu_interface = cir[idx];
+       } else if (cir[0] != 0xff) {
+               priv->cpu_interface = cir[0];
+       } else {
+               /* The system clock may not exceed 10 MHz */
+               if (clktmp > 10000000) {
+                       priv->cpu_interface |= CPUIF_DSC;
+                       clktmp /= 2;
+               }
+               /* The memory clock may not exceed 8 MHz */
+               if (clktmp > 8000000)
+                       priv->cpu_interface |= CPUIF_DMC;
+       }
+
+       if (priv->cpu_interface & CPUIF_DSC)
+               priv->can.clock.freq /= 2;
+
+       if (bcr[idx] != 0xff)
+               priv->bus_config = bcr[idx];
+       else if (bcr[0] != 0xff)
+               priv->bus_config = bcr[0];
+       else
+               priv->bus_config = BCR_DEFAULT;
+
+       if (cor[idx] != 0xff)
+               priv->clkout = cor[idx];
+       else if (cor[0] != 0xff)
+               priv->clkout = cor[0];
+       else
+               priv->clkout = COR_DEFAULT;
+
+       dev_set_drvdata(&pdev->dev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = register_cc770dev(dev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "couldn't register device (err=%d)\n", err);
+               goto exit_unmap;
+       }
+
+       dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
+                priv->reg_base, dev->irq);
+       return 0;
+
+ exit_unmap:
+       if (mem[idx])
+               iounmap(base);
+ exit_release:
+       if (mem[idx])
+               release_mem_region(mem[idx], iosize);
+       else
+               release_region(port[idx], iosize);
+ exit:
+       return err;
+}
+
+static int __devexit cc770_isa_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct cc770_priv *priv = netdev_priv(dev);
+       int idx = pdev->id;
+
+       unregister_cc770dev(dev);
+       dev_set_drvdata(&pdev->dev, NULL);
+
+       if (mem[idx]) {
+               iounmap(priv->reg_base);
+               release_mem_region(mem[idx], CC770_IOSIZE);
+       } else {
+               if (priv->read_reg == cc770_isa_port_read_reg_indirect)
+                       release_region(port[idx], CC770_IOSIZE_INDIRECT);
+               else
+                       release_region(port[idx], CC770_IOSIZE);
+       }
+       free_cc770dev(dev);
+
+       return 0;
+}
+
+static struct platform_driver cc770_isa_driver = {
+       .probe = cc770_isa_probe,
+       .remove = __devexit_p(cc770_isa_remove),
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init cc770_isa_init(void)
+{
+       int idx, err;
+
+       for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+               if ((port[idx] || mem[idx]) && irq[idx]) {
+                       cc770_isa_devs[idx] =
+                               platform_device_alloc(KBUILD_MODNAME, idx);
+                       if (!cc770_isa_devs[idx]) {
+                               err = -ENOMEM;
+                               goto exit_free_devices;
+                       }
+                       err = platform_device_add(cc770_isa_devs[idx]);
+                       if (err) {
+                               platform_device_put(cc770_isa_devs[idx]);
+                               goto exit_free_devices;
+                       }
+                       pr_debug("platform device %d: port=%#lx, mem=%#lx, "
+                                "irq=%d\n",
+                                idx, port[idx], mem[idx], irq[idx]);
+               } else if (idx == 0 || port[idx] || mem[idx]) {
+                       pr_err("insufficient parameters supplied\n");
+                       err = -EINVAL;
+                       goto exit_free_devices;
+               }
+       }
+
+       err = platform_driver_register(&cc770_isa_driver);
+       if (err)
+               goto exit_free_devices;
+
+       pr_info("driver for max. %d devices registered\n", MAXDEV);
+
+       return 0;
+
+exit_free_devices:
+       while (--idx >= 0) {
+               if (cc770_isa_devs[idx])
+                       platform_device_unregister(cc770_isa_devs[idx]);
+       }
+
+       return err;
+}
+module_init(cc770_isa_init);
+
+static void __exit cc770_isa_exit(void)
+{
+       int idx;
+
+       platform_driver_unregister(&cc770_isa_driver);
+       for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+               if (cc770_isa_devs[idx])
+                       platform_device_unregister(cc770_isa_devs[idx]);
+       }
+}
+module_exit(cc770_isa_exit);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
new file mode 100644 (file)
index 0000000..53115ee
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the platform bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * If platform data are used you should have similar definitions
+ * in your board-specific code:
+ *
+ *   static struct cc770_platform_data myboard_cc770_pdata = {
+ *           .osc_freq = 16000000,
+ *           .cir = 0x41,
+ *           .cor = 0x20,
+ *           .bcr = 0x40,
+ *   };
+ *
+ * Please see include/linux/can/platform/cc770.h for description of
+ * above fields.
+ *
+ * If the device tree is used, you need a CAN node definition in your
+ * DTS file similar to:
+ *
+ *   can@3,100 {
+ *           compatible = "bosch,cc770";
+ *           reg = <3 0x100 0x80>;
+ *           interrupts = <2 0>;
+ *           interrupt-parent = <&mpic>;
+ *           bosch,external-clock-frequency = <16000000>;
+ *   };
+ *
+ * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define DRV_NAME "cc770_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define CC770_PLATFORM_CAN_CLOCK  16000000
+
+static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg)
+{
+       return ioread8(priv->reg_base + reg);
+}
+
+static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
+                                    u8 val)
+{
+       iowrite8(val, priv->reg_base + reg);
+}
+
+static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
+                                           struct cc770_priv *priv)
+{
+       struct device_node *np = pdev->dev.of_node;
+       const u32 *prop;
+       int prop_size;
+       u32 clkext;
+
+       prop = of_get_property(np, "bosch,external-clock-frequency",
+                              &prop_size);
+       if (prop && (prop_size ==  sizeof(u32)))
+               clkext = *prop;
+       else
+               clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+       priv->can.clock.freq = clkext;
+
+       /* The system clock may not exceed 10 MHz */
+       if (priv->can.clock.freq > 10000000) {
+               priv->cpu_interface |= CPUIF_DSC;
+               priv->can.clock.freq /= 2;
+       }
+
+       /* The memory clock may not exceed 8 MHz */
+       if (priv->can.clock.freq > 8000000)
+               priv->cpu_interface |= CPUIF_DMC;
+
+       if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+               priv->cpu_interface |= CPUIF_DMC;
+       if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+               priv->cpu_interface |= CPUIF_MUX;
+
+       if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+               priv->bus_config |= BUSCFG_CBY;
+       if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+               priv->bus_config |= BUSCFG_DR0;
+       if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+               priv->bus_config |= BUSCFG_DR1;
+       if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+               priv->bus_config |= BUSCFG_DT1;
+       if (of_get_property(np, "bosch,polarity-dominant", NULL))
+               priv->bus_config |= BUSCFG_POL;
+
+       prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
+       if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
+               u32 cdv = clkext / *prop;
+               int slew;
+
+               if (cdv > 0 && cdv < 16) {
+                       priv->cpu_interface |= CPUIF_CEN;
+                       priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
+
+                       prop = of_get_property(np, "bosch,slew-rate",
+                                              &prop_size);
+                       if (prop && (prop_size == sizeof(u32))) {
+                               slew = *prop;
+                       } else {
+                               /* Determine default slew rate */
+                               slew = (CLKOUT_SL_MASK >>
+                                       CLKOUT_SL_SHIFT) -
+                                       ((cdv * clkext - 1) / 8000000);
+                               if (slew < 0)
+                                       slew = 0;
+                       }
+                       priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
+                               CLKOUT_SL_MASK;
+               } else {
+                       dev_dbg(&pdev->dev, "invalid clock-out-frequency\n");
+               }
+       }
+
+       return 0;
+}
+
+static int __devinit cc770_get_platform_data(struct platform_device *pdev,
+                                            struct cc770_priv *priv)
+{
+
+       struct cc770_platform_data *pdata = pdev->dev.platform_data;
+
+       priv->can.clock.freq = pdata->osc_freq;
+       if (priv->cpu_interface | CPUIF_DSC)
+               priv->can.clock.freq /= 2;
+       priv->clkout = pdata->cor;
+       priv->bus_config = pdata->bcr;
+       priv->cpu_interface = pdata->cir;
+
+       return 0;
+}
+
+static int __devinit cc770_platform_probe(struct platform_device *pdev)
+{
+       struct net_device *dev;
+       struct cc770_priv *priv;
+       struct resource *mem;
+       resource_size_t mem_size;
+       void __iomem *base;
+       int err, irq;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq = platform_get_irq(pdev, 0);
+       if (!mem || irq <= 0)
+               return -ENODEV;
+
+       mem_size = resource_size(mem);
+       if (!request_mem_region(mem->start, mem_size, pdev->name))
+               return -EBUSY;
+
+       base = ioremap(mem->start, mem_size);
+       if (!base) {
+               err = -ENOMEM;
+               goto exit_release_mem;
+       }
+
+       dev = alloc_cc770dev(0);
+       if (!dev) {
+               err = -ENOMEM;
+               goto exit_unmap_mem;
+       }
+
+       dev->irq = irq;
+       priv = netdev_priv(dev);
+       priv->read_reg = cc770_platform_read_reg;
+       priv->write_reg = cc770_platform_write_reg;
+       priv->irq_flags = IRQF_SHARED;
+       priv->reg_base = base;
+
+       if (pdev->dev.of_node)
+               err = cc770_get_of_node_data(pdev, priv);
+       else if (pdev->dev.platform_data)
+               err = cc770_get_platform_data(pdev, priv);
+       else
+               err = -ENODEV;
+       if (err)
+               goto exit_free_cc770;
+
+       dev_dbg(&pdev->dev,
+                "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x "
+                "bus_config=0x%02x clkout=0x%02x\n",
+                priv->reg_base, dev->irq, priv->can.clock.freq,
+                priv->cpu_interface, priv->bus_config, priv->clkout);
+
+       dev_set_drvdata(&pdev->dev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = register_cc770dev(dev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "couldn't register CC700 device (err=%d)\n", err);
+               goto exit_free_cc770;
+       }
+
+       return 0;
+
+exit_free_cc770:
+       free_cc770dev(dev);
+exit_unmap_mem:
+       iounmap(base);
+exit_release_mem:
+       release_mem_region(mem->start, mem_size);
+
+       return err;
+}
+
+static int __devexit cc770_platform_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct resource *mem;
+
+       unregister_cc770dev(dev);
+       iounmap(priv->reg_base);
+       free_cc770dev(dev);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
+
+       return 0;
+}
+
+static struct of_device_id __devinitdata cc770_platform_table[] = {
+       {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
+       {.compatible = "intc,82527"},  /* AN82527 from Intel CP */
+       {},
+};
+
+static struct platform_driver cc770_platform_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = cc770_platform_table,
+       },
+       .probe = cc770_platform_probe,
+       .remove = __devexit_p(cc770_platform_remove),
+};
+
+module_platform_driver(cc770_platform_driver);
index 25695bde0549138a572f2eeb414f686af869051f..120f1ab5a2ce0d945ba76878afcc0b2352a11f18 100644 (file)
@@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev)
 
        /* New-style flags. */
        dev->flags = IFF_NOARP;
-       dev->features = NETIF_F_NO_CSUM;
+       dev->features = NETIF_F_HW_CSUM;
 }
 
 struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
index e02337953f41b93b3c8f8e300e26adef196ef6f6..165a4c79802515123dd64375a4a0952a179b205b 100644 (file)
@@ -1060,20 +1060,7 @@ static struct platform_driver flexcan_driver = {
        .remove = __devexit_p(flexcan_remove),
 };
 
-static int __init flexcan_init(void)
-{
-       pr_info("%s netdevice driver\n", DRV_NAME);
-       return platform_driver_register(&flexcan_driver);
-}
-
-static void __exit flexcan_exit(void)
-{
-       platform_driver_unregister(&flexcan_driver);
-       pr_info("%s: driver removed\n", DRV_NAME);
-}
-
-module_init(flexcan_init);
-module_exit(flexcan_exit);
+module_platform_driver(flexcan_driver);
 
 MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
              "Marc Kleine-Budde <kernel@pengutronix.de>");
index 32778d56d3306e57d08d735925305e262bd074da..08c893cb7896a9e97e5bc920f0f65613b33e24f6 100644 (file)
@@ -1803,20 +1803,9 @@ static struct platform_driver ican3_driver = {
        .remove         = __devexit_p(ican3_remove),
 };
 
-static int __init ican3_init(void)
-{
-       return platform_driver_register(&ican3_driver);
-}
-
-static void __exit ican3_exit(void)
-{
-       platform_driver_unregister(&ican3_driver);
-}
+module_platform_driver(ican3_driver);
 
 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
 MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:janz-ican3");
-
-module_init(ican3_init);
-module_exit(ican3_exit);
index 5fedc3375562316c3e13f0a9a7ff4b94854c9c34..5caa572d71e3bbf2babd6c9e918950dd159765ce 100644 (file)
@@ -411,17 +411,7 @@ static struct platform_driver mpc5xxx_can_driver = {
 #endif
 };
 
-static int __init mpc5xxx_can_init(void)
-{
-       return platform_driver_register(&mpc5xxx_can_driver);
-}
-module_init(mpc5xxx_can_init);
-
-static void __exit mpc5xxx_can_exit(void)
-{
-       platform_driver_unregister(&mpc5xxx_can_driver);
-};
-module_exit(mpc5xxx_can_exit);
+module_platform_driver(mpc5xxx_can_driver);
 
 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
 MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
index ec4a3119e2c93d9b6ee0c157a6c13c939d612a16..1c82dd8b896e773ec38fc4177ddb6e77db559f9a 100644 (file)
@@ -581,7 +581,10 @@ static int mscan_open(struct net_device *dev)
 
        priv->open_time = jiffies;
 
-       clrbits8(&regs->canctl1, MSCAN_LISTEN);
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               setbits8(&regs->canctl1, MSCAN_LISTEN);
+       else
+               clrbits8(&regs->canctl1, MSCAN_LISTEN);
 
        ret = mscan_start(dev);
        if (ret)
@@ -690,7 +693,8 @@ struct net_device *alloc_mscandev(void)
        priv->can.bittiming_const = &mscan_bittiming_const;
        priv->can.do_set_bittiming = mscan_do_set_bittiming;
        priv->can.do_set_mode = mscan_do_set_mode;
-       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+               CAN_CTRLMODE_LISTENONLY;
 
        for (i = 0; i < TX_QUEUE_SIZE; i++) {
                priv->tx_queue[i].id = i;
index fe9e64d476eb0174d703986cdf58a399bf71ff8b..36e9d594069dba65e91c6b51733280db257d7fb6 100644 (file)
@@ -6,7 +6,6 @@ if CAN_SJA1000
 
 config CAN_SJA1000_ISA
        tristate "ISA Bus based legacy SJA1000 driver"
-       depends on ISA
        ---help---
          This driver adds legacy support for SJA1000 chips connected to
          the ISA bus using I/O port, memory mapped or indirect access.
index 905bce0b3a4328bdc59243481d2624c63f55b0e8..2c7f5036f570fd70d1078c1fabef3b7336d2d89e 100644 (file)
@@ -20,7 +20,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
index 496223e9e2fc469d14dffee5bd584951239cabe1..90c5c2dfd2fd17a515232520daf4dadd1a33d53f 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/isa.h>
+#include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/delay.h>
@@ -44,9 +44,9 @@ static unsigned long port[MAXDEV];
 static unsigned long mem[MAXDEV];
 static int __devinitdata irq[MAXDEV];
 static int __devinitdata clk[MAXDEV];
-static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
 
 module_param_array(port, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(port, "I/O port number");
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number");
 module_param_array(mem, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(mem, "I/O memory address");
 
-module_param_array(indirect, byte, NULL, S_IRUGO);
+module_param_array(indirect, int, NULL, S_IRUGO);
 MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
 
 module_param_array(irq, int, NULL, S_IRUGO);
@@ -75,6 +75,8 @@ MODULE_PARM_DESC(ocr, "Output control register "
 #define SJA1000_IOSIZE          0x20
 #define SJA1000_IOSIZE_INDIRECT 0x02
 
+static struct platform_device *sja1000_isa_devs[MAXDEV];
+
 static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg)
 {
        return readb(priv->reg_base + reg);
@@ -115,26 +117,18 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
        outb(val, base + 1);
 }
 
-static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx)
-{
-       if (port[idx] || mem[idx]) {
-               if (irq[idx])
-                       return 1;
-       } else if (idx)
-               return 0;
-
-       dev_err(pdev, "insufficient parameters supplied\n");
-       return 0;
-}
-
-static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
+static int __devinit sja1000_isa_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
        struct sja1000_priv *priv;
        void __iomem *base = NULL;
        int iosize = SJA1000_IOSIZE;
+       int idx = pdev->id;
        int err;
 
+       dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+               idx, port[idx], mem[idx], irq[idx]);
+
        if (mem[idx]) {
                if (!request_mem_region(mem[idx], iosize, DRV_NAME)) {
                        err = -EBUSY;
@@ -189,31 +183,31 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
        else
                priv->can.clock.freq = CLK_DEFAULT / 2;
 
-       if (ocr[idx] != -1)
-               priv->ocr = ocr[idx] & 0xff;
-       else if (ocr[0] != -1)
-               priv->ocr = ocr[0] & 0xff;
+       if (ocr[idx] != 0xff)
+               priv->ocr = ocr[idx];
+       else if (ocr[0] != 0xff)
+               priv->ocr = ocr[0];
        else
                priv->ocr = OCR_DEFAULT;
 
-       if (cdr[idx] != -1)
-               priv->cdr = cdr[idx] & 0xff;
-       else if (cdr[0] != -1)
-               priv->cdr = cdr[0] & 0xff;
+       if (cdr[idx] != 0xff)
+               priv->cdr = cdr[idx];
+       else if (cdr[0] != 0xff)
+               priv->cdr = cdr[0];
        else
                priv->cdr = CDR_DEFAULT;
 
-       dev_set_drvdata(pdev, dev);
-       SET_NETDEV_DEV(dev, pdev);
+       dev_set_drvdata(&pdev->dev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
 
        err = register_sja1000dev(dev);
        if (err) {
-               dev_err(pdev, "registering %s failed (err=%d)\n",
+               dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
                        DRV_NAME, err);
                goto exit_unmap;
        }
 
-       dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n",
+       dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
                 DRV_NAME, priv->reg_base, dev->irq);
        return 0;
 
@@ -229,13 +223,14 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
        return err;
 }
 
-static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
+static int __devexit sja1000_isa_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
        struct sja1000_priv *priv = netdev_priv(dev);
+       int idx = pdev->id;
 
        unregister_sja1000dev(dev);
-       dev_set_drvdata(pdev, NULL);
+       dev_set_drvdata(&pdev->dev, NULL);
 
        if (mem[idx]) {
                iounmap(priv->reg_base);
@@ -251,29 +246,70 @@ static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
        return 0;
 }
 
-static struct isa_driver sja1000_isa_driver = {
-       .match = sja1000_isa_match,
+static struct platform_driver sja1000_isa_driver = {
        .probe = sja1000_isa_probe,
        .remove = __devexit_p(sja1000_isa_remove),
        .driver = {
                .name = DRV_NAME,
+               .owner = THIS_MODULE,
        },
 };
 
 static int __init sja1000_isa_init(void)
 {
-       int err = isa_register_driver(&sja1000_isa_driver, MAXDEV);
+       int idx, err;
+
+       for (idx = 0; idx < MAXDEV; idx++) {
+               if ((port[idx] || mem[idx]) && irq[idx]) {
+                       sja1000_isa_devs[idx] =
+                               platform_device_alloc(DRV_NAME, idx);
+                       if (!sja1000_isa_devs[idx]) {
+                               err = -ENOMEM;
+                               goto exit_free_devices;
+                       }
+                       err = platform_device_add(sja1000_isa_devs[idx]);
+                       if (err) {
+                               platform_device_put(sja1000_isa_devs[idx]);
+                               goto exit_free_devices;
+                       }
+                       pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, "
+                                "irq=%d\n",
+                                DRV_NAME, idx, port[idx], mem[idx], irq[idx]);
+               } else if (idx == 0 || port[idx] || mem[idx]) {
+                               pr_err("%s: insufficient parameters supplied\n",
+                                      DRV_NAME);
+                               err = -EINVAL;
+                               goto exit_free_devices;
+               }
+       }
+
+       err = platform_driver_register(&sja1000_isa_driver);
+       if (err)
+               goto exit_free_devices;
+
+       pr_info("Legacy %s driver for max. %d devices registered\n",
+               DRV_NAME, MAXDEV);
+
+       return 0;
+
+exit_free_devices:
+       while (--idx >= 0) {
+               if (sja1000_isa_devs[idx])
+                       platform_device_unregister(sja1000_isa_devs[idx]);
+       }
 
-       if (!err)
-               printk(KERN_INFO
-                      "Legacy %s driver for max. %d devices registered\n",
-                      DRV_NAME, MAXDEV);
        return err;
 }
 
 static void __exit sja1000_isa_exit(void)
 {
-       isa_unregister_driver(&sja1000_isa_driver);
+       int idx;
+
+       platform_driver_unregister(&sja1000_isa_driver);
+       for (idx = 0; idx < MAXDEV; idx++) {
+               if (sja1000_isa_devs[idx])
+                       platform_device_unregister(sja1000_isa_devs[idx]);
+       }
 }
 
 module_init(sja1000_isa_init);
index c3dd9d09be572b0ff165edc3a28102935839b418..f2683eb6a3d588a7dff7af9f8830a79dc18206c7 100644 (file)
@@ -220,14 +220,4 @@ static struct platform_driver sja1000_ofp_driver = {
        .remove = __devexit_p(sja1000_ofp_remove),
 };
 
-static int __init sja1000_ofp_init(void)
-{
-       return platform_driver_register(&sja1000_ofp_driver);
-}
-module_init(sja1000_ofp_init);
-
-static void __exit sja1000_ofp_exit(void)
-{
-       return platform_driver_unregister(&sja1000_ofp_driver);
-};
-module_exit(sja1000_ofp_exit);
+module_platform_driver(sja1000_ofp_driver);
index d9fadc489b32dc75f09c0d55d63b9e85b90f143c..4f50145f64839f519c232d0e4ec79e7fedf2bd34 100644 (file)
@@ -185,15 +185,4 @@ static struct platform_driver sp_driver = {
        },
 };
 
-static int __init sp_init(void)
-{
-       return platform_driver_register(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
-       platform_driver_unregister(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
+module_platform_driver(sp_driver);
index a979b006f4591fe68773dbe5db84dcc160439792..3f1ebcc2cb831af2f09add75d793fd9b1d67f57b 100644 (file)
@@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev)
 
        /* New-style flags. */
        dev->flags              = IFF_NOARP;
-       dev->features           = NETIF_F_NO_CSUM;
+       dev->features           = NETIF_F_HW_CSUM;
 }
 
 /******************************************
index 09a8b86cf1ac314819a5e386bf3500bea76ec9e5..a7c77c744ee9150d506fcfcb891bc7ac719606e6 100644 (file)
@@ -874,21 +874,9 @@ static struct platform_driver softing_driver = {
        .remove = __devexit_p(softing_pdev_remove),
 };
 
-MODULE_ALIAS("platform:softing");
-
-static int __init softing_start(void)
-{
-       return platform_driver_register(&softing_driver);
-}
-
-static void __exit softing_stop(void)
-{
-       platform_driver_unregister(&softing_driver);
-}
-
-module_init(softing_start);
-module_exit(softing_stop);
+module_platform_driver(softing_driver);
 
+MODULE_ALIAS("platform:softing");
 MODULE_DESCRIPTION("Softing DPRAM CAN driver");
 MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
 MODULE_LICENSE("GPL v2");
index 2adc294f512a8c0add1ba99c49660b0a48a5bd7d..df809e3f130eed15f952323d0291ba6daa1661b8 100644 (file)
@@ -1037,20 +1037,7 @@ static struct platform_driver ti_hecc_driver = {
        .resume = ti_hecc_resume,
 };
 
-static int __init ti_hecc_init_driver(void)
-{
-       printk(KERN_INFO DRV_DESC "\n");
-       return platform_driver_register(&ti_hecc_driver);
-}
-
-static void __exit ti_hecc_exit_driver(void)
-{
-       printk(KERN_INFO DRV_DESC " unloaded\n");
-       platform_driver_unregister(&ti_hecc_driver);
-}
-
-module_exit(ti_hecc_exit_driver);
-module_init(ti_hecc_init_driver);
+module_platform_driver(ti_hecc_driver);
 
 MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
 MODULE_LICENSE("GPL v2");
index f93e2d6fc88c5e05dfc8b67cc40c39356ed69eb4..ea2d94285936bcc1e0df8b42beb297d7979ec1bf 100644 (file)
@@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
  * See Documentation/networking/can.txt for details.
  */
 
-static int echo; /* echo testing. Default: 0 (Off) */
+static bool echo; /* echo testing. Default: 0 (Off) */
 module_param(echo, bool, S_IRUGO);
 MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
 
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644 (file)
index 0000000..dd151d5
--- /dev/null
@@ -0,0 +1,36 @@
+menu "Distributed Switch Architecture drivers"
+       depends on NET_DSA
+
+config NET_DSA_MV88E6XXX
+       tristate
+       default n
+
+config NET_DSA_MV88E6060
+       tristate "Marvell 88E6060 ethernet switch chip support"
+       select NET_DSA_TAG_TRAILER
+       ---help---
+         This enables support for the Marvell 88E6060 ethernet switch
+         chip.
+
+config NET_DSA_MV88E6XXX_NEED_PPU
+       bool
+       default n
+
+config NET_DSA_MV88E6131
+       tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+       select NET_DSA_MV88E6XXX
+       select NET_DSA_MV88E6XXX_NEED_PPU
+       select NET_DSA_TAG_DSA
+       ---help---
+         This enables support for the Marvell 88E6085/6095/6095F/6131
+         ethernet switch chips.
+
+config NET_DSA_MV88E6123_61_65
+       tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+       select NET_DSA_MV88E6XXX
+       select NET_DSA_TAG_EDSA
+       ---help---
+         This enables support for the Marvell 88E6123/6161/6165
+         ethernet switch chips.
+
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644 (file)
index 0000000..f3bda05
--- /dev/null
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
+mv88e6xxx_drv-y += mv88e6xxx.o
+ifdef CONFIG_NET_DSA_MV88E6123_61_65
+mv88e6xxx_drv-y += mv88e6123_61_65.o
+endif
+ifdef CONFIG_NET_DSA_MV88E6131
+mv88e6xxx_drv-y += mv88e6131.o
+endif
similarity index 96%
rename from net/dsa/mv88e6060.c
rename to drivers/net/dsa/mv88e6060.c
index 8f4ff5a2c8133b9961b78b969aeed05318fc1d4d..7fc4e81d4d4353f6a85658d5be49ee6350264b8b 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 
 #define REG_PORT(p)            (8 + (p))
 #define REG_GLOBAL             0x0f
@@ -286,3 +286,8 @@ static void __exit mv88e6060_cleanup(void)
        unregister_switch_driver(&mv88e6060_switch_driver);
 }
 module_exit(mv88e6060_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
similarity index 96%
rename from net/dsa/mv88e6123_61_65.c
rename to drivers/net/dsa/mv88e6123_61_65.c
index 52faaa21a4d927e1a943840b01e2c85bb20464b9..c0a458fc698fad1306641c03ba50c1567d964598 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
@@ -419,7 +419,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
        return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
 }
 
-static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
        .tag_protocol           = cpu_to_be16(ETH_P_EDSA),
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6123_61_65_probe,
@@ -433,15 +433,6 @@ static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
        .get_sset_count         = mv88e6123_61_65_get_sset_count,
 };
 
-static int __init mv88e6123_61_65_init(void)
-{
-       register_switch_driver(&mv88e6123_61_65_switch_driver);
-       return 0;
-}
-module_init(mv88e6123_61_65_init);
-
-static void __exit mv88e6123_61_65_cleanup(void)
-{
-       unregister_switch_driver(&mv88e6123_61_65_switch_driver);
-}
-module_exit(mv88e6123_61_65_cleanup);
+MODULE_ALIAS("platform:mv88e6123");
+MODULE_ALIAS("platform:mv88e6161");
+MODULE_ALIAS("platform:mv88e6165");
similarity index 96%
rename from net/dsa/mv88e6131.c
rename to drivers/net/dsa/mv88e6131.c
index 9bd1061fa4ee5f6ef0b88b838c458d47603d541a..e0eb682438343b774130769b24c42472c25e1af4 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 /*
@@ -415,7 +415,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds)
        return ARRAY_SIZE(mv88e6131_hw_stats);
 }
 
-static struct dsa_switch_driver mv88e6131_switch_driver = {
+struct dsa_switch_driver mv88e6131_switch_driver = {
        .tag_protocol           = cpu_to_be16(ETH_P_DSA),
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6131_probe,
@@ -429,15 +429,7 @@ static struct dsa_switch_driver mv88e6131_switch_driver = {
        .get_sset_count         = mv88e6131_get_sset_count,
 };
 
-static int __init mv88e6131_init(void)
-{
-       register_switch_driver(&mv88e6131_switch_driver);
-       return 0;
-}
-module_init(mv88e6131_init);
-
-static void __exit mv88e6131_cleanup(void)
-{
-       unregister_switch_driver(&mv88e6131_switch_driver);
-}
-module_exit(mv88e6131_cleanup);
+MODULE_ALIAS("platform:mv88e6085");
+MODULE_ALIAS("platform:mv88e6095");
+MODULE_ALIAS("platform:mv88e6095f");
+MODULE_ALIAS("platform:mv88e6131");
similarity index 93%
rename from net/dsa/mv88e6xxx.c
rename to drivers/net/dsa/mv88e6xxx.c
index efe661a9def4db3644d0e8402e2b00295a3cd0c6..5467c040824a6d9d96c947f8a39c8e10e7afc753 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 /*
@@ -520,3 +520,30 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
 
        mutex_unlock(&ps->stats_mutex);
 }
+
+static int __init mv88e6xxx_init(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+       register_switch_driver(&mv88e6131_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+       register_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+       return 0;
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+       unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+       unregister_switch_driver(&mv88e6131_switch_driver);
+#endif
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
similarity index 95%
rename from net/dsa/mv88e6xxx.h
rename to drivers/net/dsa/mv88e6xxx.h
index 61156ca26a0da3d17e7be6bc70bc7c11de4c6489..fc2cd7b90e8d257f9144933bb93f29c77b51ad83 100644 (file)
@@ -71,6 +71,9 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
                                 int nr_stats, struct mv88e6xxx_hw_stat *stats,
                                 int port, uint64_t *data);
 
+extern struct dsa_switch_driver mv88e6131_switch_driver;
+extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+
 #define REG_READ(addr, reg)                                            \
        ({                                                              \
                int __ret;                                              \
index a7c5e8831e8c0f3a5a01e5adc53b29b2bc85129d..087648ea1edb0ff015ce8abce33ef7ea7a71e0d2 100644 (file)
@@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->flags &= ~IFF_MULTICAST;
        dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
-       dev->features   |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+       dev->features   |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
        random_ether_addr(dev->dev_addr);
 }
 
index 972f80ecc510a7346c5ca364535faed2a8e1bbea..da410f036869cd4cb4f9368acfc4441c027f2278 100644 (file)
@@ -468,9 +468,10 @@ static void tc589_reset(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       snprintf(info->bus_info, sizeof(info->bus_info),
+               "PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
index b42c06baba8983bbf1b78c1115d7a72f1a262864..8153a3e0a1a4457a5c156508d7c605362bc76e37 100644 (file)
@@ -2929,15 +2929,17 @@ static void vortex_get_drvinfo(struct net_device *dev,
 {
        struct vortex_private *vp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        if (VORTEX_PCI(vp)) {
-               strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+               strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+                       sizeof(info->bus_info));
        } else {
                if (VORTEX_EISA(vp))
-                       strcpy(info->bus_info, dev_name(vp->gendev));
+                       strlcpy(info->bus_info, dev_name(vp->gendev),
+                               sizeof(info->bus_info));
                else
-                       sprintf(info->bus_info, "EISA 0x%lx %d",
-                                       dev->base_addr, dev->irq);
+                       snprintf(info->bus_info, sizeof(info->bus_info),
+                               "EISA 0x%lx %d", dev->base_addr, dev->irq);
        }
 }
 
index 20ea07508ac747304dd1448da1e7deb4d9a67041..6d6bc754b1a8e9f3fd49255aaadfe5620c49d567 100644 (file)
@@ -988,21 +988,23 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 
        smp_rmb();
        if(tp->card_state == Sleeping) {
-               strcpy(info->fw_version, "Sleep image");
+               strlcpy(info->fw_version, "Sleep image",
+                       sizeof(info->fw_version));
        } else {
                INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
                if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
-                       strcpy(info->fw_version, "Unknown runtime");
+                       strlcpy(info->fw_version, "Unknown runtime",
+                               sizeof(info->fw_version));
                } else {
                        u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
-                       snprintf(info->fw_version, 32, "%02x.%03x.%03x",
-                                sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
-                                sleep_ver & 0xfff);
+                       snprintf(info->fw_version, sizeof(info->fw_version),
+                               "%02x.%03x.%03x", sleep_ver >> 24,
+                               (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
                }
        }
 
-       strcpy(info->driver, KBUILD_MODNAME);
-       strcpy(info->bus_info, pci_name(pci_dev));
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static int
index 58a12e4c78f953da62b043550dcc89093ff1cbc5..ef325ffa1b5ac63dd61dd0c0230c8a63a1a4a6cf 100644 (file)
@@ -14,8 +14,6 @@
 
 #define TX_PAGES 12    /* Two Tx slots */
 
-#define ETHER_ADDR_LEN 6
-
 /* The 8390 specific per-packet-header format. */
 struct e8390_pkt_hdr {
   unsigned char status; /* status */
index 547737340cbba774c158e613857a58d27d282d7c..3ad5d2f9a49cc42622038ff1986ac8505952f66a 100644 (file)
@@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
     i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
     if (i) return i;
 
-    for(i = 0; i < ETHER_ADDR_LEN; i++)
+    for (i = 0; i < ETH_ALEN; i++)
        dev->dev_addr[i] = SA_prom[i];
 
     printk(" %pM\n", dev->dev_addr);
index e9f8432f55b461f388be8643386b948a5b08ca45..9e8ba4f5636bd71bcf2c9946450b76c587dbddb8 100644 (file)
@@ -735,15 +735,14 @@ static int ax_init_dev(struct net_device *dev)
        if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
                ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
                        ei_local->mem + E8390_CMD); /* 0x61 */
-               for (i = 0; i < ETHER_ADDR_LEN; i++)
+               for (i = 0; i < ETH_ALEN; i++)
                        dev->dev_addr[i] =
                                ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
        }
 
        if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
            ax->plat->mac_addr)
-               memcpy(dev->dev_addr, ax->plat->mac_addr,
-                      ETHER_ADDR_LEN);
+               memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
 
        ax_reset_8390(dev);
 
@@ -991,18 +990,7 @@ static struct platform_driver axdrv = {
        .resume         = ax_resume,
 };
 
-static int __init axdrv_init(void)
-{
-       return platform_driver_register(&axdrv);
-}
-
-static void __exit axdrv_exit(void)
-{
-       platform_driver_unregister(&axdrv);
-}
-
-module_init(axdrv_init);
-module_exit(axdrv_exit);
+module_platform_driver(axdrv);
 
 MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
index 7a09575ecff05c3f8f48afb68eb5da4ddbbc2326..6428f9e7a554d7b994da4a9f982b44cc01359421 100644 (file)
@@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
                goto out;
        }
 
-       for (i = 0; i < ETHER_ADDR_LEN ; i++)
+       for (i = 0; i < ETH_ALEN ; i++)
                dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
 
 /*     Check the Racal vendor ID as well. */
index eeac843dcd2df9f6298ca12e6e60aecfdff18975..d42938b6b596e32b6a00b6c562fa129cc3600d60 100644 (file)
@@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
        /* Retrieve and checksum the station address. */
        outw(MAC_Page, ioaddr + HP_PAGING);
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++) {
+       for(i = 0; i < ETH_ALEN; i++) {
                unsigned char inval = inb(ioaddr + 8 + i);
                dev->dev_addr[i] = inval;
                checksum += inval;
index 18564d4a7c04d0c9fc1f6050f78bd67c2532f90f..113f1e075a2644bdab095bf087d02799f3c82238 100644 (file)
@@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
 
        printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for(i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = inb(ioaddr + i);
 
        printk(" %pM", dev->dev_addr);
index 3dac937a67c4d0c184fb434b26e973d432f34518..5370c884620b1503a6526bb305ee6c7f495b031f 100644 (file)
@@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
     if (!dev)
        return -ENOMEM;
 
-    for(j = 0; j < ETHER_ADDR_LEN; j++)
+    for (j = 0; j < ETH_ALEN; j++)
        dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
 
     /* We must set the 8390 for word mode. */
index f9888d20177ba9d03af278a1b8eefc8bc3ced33f..69490ae018ea5e8a9cfbb25d06287a6280492319 100644 (file)
@@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
                || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
                || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
                printk("lne390.c: card not found");
-               for(i = 0; i < ETHER_ADDR_LEN; i++)
+               for (i = 0; i < ETH_ALEN; i++)
                        printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
                printk(" (invalid prefix).\n");
                return -ENODEV;
        }
 #endif
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
        printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
               0xa+revision, ioaddr/0x1000, dev->dev_addr);
index cd36a6a5f4081390fc6a5af810e864f93a55ec67..9b9c77d5a65cca2858d4a35c6210500363c54dfd 100644 (file)
@@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
 
        dev->base_addr = ioaddr;
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = SA_prom[i];
        printk(" %pM\n", dev->dev_addr);
 
index 1063093b3afc5cfd9cd1545b73cbbdd64db0ec12..f92ea2a65a576d20f5732975794421cfc142ad28 100644 (file)
@@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 #ifdef CONFIG_PLAT_MAPPI
        outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
                ioaddr + E8390_CMD); /* 0x61 */
-       for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+       for (i = 0; i < ETH_ALEN; i++) {
                dev->dev_addr[i] = SA_prom[i]
                        = inb_p(ioaddr + EN1_PHYS_SHIFT(i));
        }
 #else
-       for(i = 0; i < ETHER_ADDR_LEN; i++) {
+       for (i = 0; i < ETH_ALEN; i++) {
                dev->dev_addr[i] = SA_prom[i];
        }
 #endif
index 70cdc6996342f9b6f5ab821593da43521ca6eb44..922b32036c6380d296334a9503e8e334f9f4cc0f 100644 (file)
@@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
 
        dev->base_addr = base_addr;
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = SA_prom[i];
 
        printk(" %pM\n", dev->dev_addr);
index 39923425ba2547f56fc4d02306ebac2848030635..3fab04a0034a3aa68ecc45544cf3cdf9e682af31 100644 (file)
@@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
        struct ei_device *ei = netdev_priv(dev);
        struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static const struct ethtool_ops ne2k_pci_ethtool_ops = {
index 243ed2aee88e35e801a2c6582f8a80d4e287ba7b..2a3e8057feaef80341460ecd1854c55d3dfdd61d 100644 (file)
@@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device)
 #endif
 
        port_index = inb(ioaddr + NE3210_CFG2) >> 6;
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
        printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
                edev->slot, ifmap[port_index], dev->dev_addr);
index d85f0a84bc7bc0c2a993d72a591114a2f43bb29f..3b903759980a01c318ead3411249adec47382e57 100644 (file)
@@ -114,7 +114,7 @@ static int __init stnic_probe(void)
 #ifdef CONFIG_SH_STANDARD_BIOS
   sh_bios_get_node_addr (stnic_eadr);
 #endif
-  for (i = 0; i < ETHER_ADDR_LEN; i++)
+  for (i = 0; i < ETH_ALEN; i++)
     dev->dev_addr[i] = stnic_eadr[i];
 
   /* Set the base address to point to the NIC, not the "real" base! */
index 3aa9fe9999b5c5c6a34639fa8a1768234d75af1c..bcd27323b2030dccb635def78f29dcebc0df4192 100644 (file)
@@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
        if (i)
                return i;
 
-       for (i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = SA_prom[i];
 
        pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
index 597f4d45c632944840c0c5949b1f76d8600131e9..3474a61d470501adb915b6dc2e0cef487672a971 100644 (file)
@@ -28,6 +28,7 @@ source "drivers/net/ethernet/cadence/Kconfig"
 source "drivers/net/ethernet/adi/Kconfig"
 source "drivers/net/ethernet/broadcom/Kconfig"
 source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
index be5dde040261b748e75535e4521cc5974f946b75..cd6d69a6a7d245afbba4daeba2c9bdb29531dbcc 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ATMEL) += cadence/
 obj-$(CONFIG_NET_BFIN) += adi/
 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
index 6d9f6911000ffad9d2b8a299c6ae5fd82ebfcfe6..cb4f38a17f2006aa5fa4971c7a260d6d5957fff1 100644 (file)
@@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops;
 
 
 #ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct netdev_private *np = netdev_priv(dev);
 
@@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, np->active_vlans);
        set_rx_mode(dev);
        spin_unlock(&np->lock);
+
+       return 0;
 }
 
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct netdev_private *np = netdev_priv(dev);
 
@@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, np->active_vlans);
        set_rx_mode(dev);
        spin_unlock(&np->lock);
+
+       return 0;
 }
 #endif /* VLAN_SUPPORT */
 
@@ -1842,9 +1846,9 @@ static int check_if_running(struct net_device *dev)
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct netdev_private *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
index 442fefa4f2ca373caa9759e128493fe0e3824efb..c885aa905decb01c1399ea50639cc2d734d3b419 100644 (file)
@@ -1623,18 +1623,7 @@ static struct platform_driver greth_of_driver = {
        .remove = __devexit_p(greth_of_remove),
 };
 
-static int __init greth_init(void)
-{
-       return platform_driver_register(&greth_of_driver);
-}
-
-static void __exit greth_cleanup(void)
-{
-       platform_driver_unregister(&greth_of_driver);
-}
-
-module_init(greth_init);
-module_exit(greth_cleanup);
+module_platform_driver(greth_of_driver);
 
 MODULE_AUTHOR("Aeroflex Gaisler AB.");
 MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
index a9745f4ddbfe11ad06573f2aa1043f2e6f249a53..33e0a8c20f6b42db39ae423fbed5e30e9c100656 100644 (file)
@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev)
        writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
 
        /* Setting the MAC address to the device */
-       for(i = 0; i < ETH_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                writeb( dev->dev_addr[i], mmio + PADR + i );
 
        /* Enable interrupt coalesce */
@@ -1412,10 +1412,11 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        struct pci_dev *pci_dev = lp->pci_dev;
-       strcpy (info->driver, MODULE_NAME);
-       strcpy (info->version, MODULE_VERS);
-       sprintf(info->fw_version,"%u",chip_version);
-       strcpy (info->bus_info, pci_name(pci_dev));
+       strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+       snprintf(info->fw_version, sizeof(info->fw_version),
+               "%u", chip_version);
+       strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1549,7 +1550,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
        spin_lock_irq(&lp->lock);
        /* Setting the MAC address to the device */
-       for(i = 0; i < ETH_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                writeb( dev->dev_addr[i], lp->mmio + PADR + i );
 
        spin_unlock_irq(&lp->lock);
@@ -1885,7 +1886,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
        }
 
        /* Initializing MAC address */
-       for(i = 0; i < ETH_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = readb(lp->mmio + PADR + i);
 
        /* Setting user defined parametrs */
index 2ff2e7a12dd0217764f5c4867151903115720c37..8baa3527ba746f289ed0aead68fd350be4bb09b0 100644 (file)
@@ -586,7 +586,6 @@ typedef enum {
 
 #define PKT_BUFF_SZ                    1536
 #define MIN_PKT_LEN                    60
-#define ETH_ADDR_LEN                   6
 
 #define  AMD8111E_TX_TIMEOUT           (3 * HZ)/* 3 sec */
 #define SOFT_TIMER_FREQ                0xBEBC  /* 0.5 sec */
@@ -808,8 +807,8 @@ typedef enum {
 
 static int card_idx;
 static int speed_duplex[MAX_UNITS] = { 0, };
-static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
-static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
 static unsigned int chip_version;
 
 #endif /* _AMD8111E_H */
index 4865ff14bebf230a9c5a460d2817bd029029493d..cc9262be69c86c68020f7f4568662f0068bb2f4c 100644 (file)
@@ -1339,18 +1339,7 @@ static struct platform_driver au1000_eth_driver = {
                .owner  = THIS_MODULE,
        },
 };
-MODULE_ALIAS("platform:au1000-eth");
-
-
-static int __init au1000_init_module(void)
-{
-       return platform_driver_register(&au1000_eth_driver);
-}
 
-static void __exit au1000_exit_module(void)
-{
-       platform_driver_unregister(&au1000_eth_driver);
-}
+module_platform_driver(au1000_eth_driver);
 
-module_init(au1000_init_module);
-module_exit(au1000_exit_module);
+MODULE_ALIAS("platform:au1000-eth");
index 3accd5d21b08ff1bb5a423485d1bf775273501ce..6be0dd67631a448cdeafb39ce23185ae150c22ed 100644 (file)
@@ -160,8 +160,6 @@ Include Files
 Defines
 ---------------------------------------------------------------------------- */
 
-#define ETHER_ADDR_LEN                 ETH_ALEN
-                                       /* 6 bytes in an Ethernet Address */
 #define MACE_LADRF_LEN                 8
                                        /* 8 bytes in Logical Address Filter */
 
@@ -600,7 +598,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
        }
   }
   /* Set PADR register */
-  for (i = 0; i < ETHER_ADDR_LEN; i++)
+  for (i = 0; i < ETH_ALEN; i++)
     mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
 
   /* MAC Configuration Control Register should be written last */
@@ -639,11 +637,11 @@ static int nmclan_config(struct pcmcia_device *link)
 
   /* Read the ethernet address from the CIS. */
   len = pcmcia_get_tuple(link, 0x80, &buf);
-  if (!buf || len < ETHER_ADDR_LEN) {
+  if (!buf || len < ETH_ALEN) {
          kfree(buf);
          goto failed;
   }
-  memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+  memcpy(dev->dev_addr, buf, ETH_ALEN);
   kfree(buf);
 
   /* Verify configuration by reading the MACE ID. */
@@ -822,9 +820,10 @@ static int mace_close(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       snprintf(info->bus_info, sizeof(info->bus_info),
+               "PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1420,7 +1419,7 @@ Output
 static void set_multicast_list(struct net_device *dev)
 {
   mace_private *lp = netdev_priv(dev);
-  int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+  int adr[ETH_ALEN] = {0}; /* Ethernet address */
   struct netdev_hw_addr *ha;
 
 #ifdef PCMCIA_DEBUG
@@ -1442,7 +1441,7 @@ static void set_multicast_list(struct net_device *dev)
     /* Calculate multicast logical address filter */
     memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
     netdev_for_each_mc_addr(ha, dev) {
-      memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+      memcpy(adr, ha->addr, ETH_ALEN);
       BuildLAF(lp->multicast_ladrf, adr);
     }
   }
index f92bc6e348283f2bb026ffcce0f37ddd07eb4359..20e6dab0186c78da49d43e14b5f66370936c7cf3 100644 (file)
@@ -711,12 +711,14 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
 {
        struct pcnet32_private *lp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        if (lp->pci_dev)
-               strcpy(info->bus_info, pci_name(lp->pci_dev));
+               strlcpy(info->bus_info, pci_name(lp->pci_dev),
+                       sizeof(info->bus_info));
        else
-               sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+               snprintf(info->bus_info, sizeof(info->bus_info),
+                       "VLB 0x%lx", dev->base_addr);
 }
 
 static u32 pcnet32_get_link(struct net_device *dev)
index 8fda457f94cf809d82793a659359e78ab0876b7a..7ea16d32a5f5cd49aafb3fbc6e71368c23147674 100644 (file)
@@ -1540,17 +1540,4 @@ static struct platform_driver sunlance_sbus_driver = {
        .remove         = __devexit_p(sunlance_sbus_remove),
 };
 
-
-/* Find all the lance cards on the system and initialize them */
-static int __init sparc_lance_init(void)
-{
-       return platform_driver_register(&sunlance_sbus_driver);
-}
-
-static void __exit sparc_lance_exit(void)
-{
-       platform_driver_unregister(&sunlance_sbus_driver);
-}
-
-module_init(sparc_lance_init);
-module_exit(sparc_lance_exit);
+module_platform_driver(sunlance_sbus_driver);
index 7be884d0aaf6e36464a5908aefdd29c12a2dadab..0a9326aa58b5b4a8e3f4730eedbc1f395651b4ba 100644 (file)
@@ -232,7 +232,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->driver,  atl1c_driver_name, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, atl1c_driver_version,
                sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
index 02c7ed8d9eca01414ef78aaeb84011ee60f38069..b8591246eb4c35b9a12b53254ba4853381238ee7 100644 (file)
@@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev)
        }
 }
 
-static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -422,7 +422,8 @@ static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
        }
 }
 
-static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1c_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atl1c_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = adapter->pdev;
@@ -482,7 +483,8 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
                roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
 }
 
-static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -499,9 +501,10 @@ static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atl1c_set_features(struct net_device *netdev, u32 features)
+static int atl1c_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atl1c_vlan_mode(netdev, features);
index 6269438d365f9d413650611e8e06cccf8d3e9a5c..6e61f9f9ebb538b4fc6d0ef885b6c427fc48e3e7 100644 (file)
@@ -310,10 +310,12 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
 {
        struct atl1e_adapter *adapter = netdev_priv(netdev);
 
-       strncpy(drvinfo->driver,  atl1e_driver_name, 32);
-       strncpy(drvinfo->version, atl1e_driver_version, 32);
-       strncpy(drvinfo->fw_version, "L1e", 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  atl1e_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, atl1e_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = atl1e_get_regs_len(netdev);
index 95483bcac1d029c38764385507d8a6824b4f504f..c915c0873810ddb7c561aa416b34c416c5fb8b42 100644 (file)
@@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev)
        }
 }
 
-static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -324,7 +324,8 @@ static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
        }
 }
 
-static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1e_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atl1e_adapter *adapter = netdev_priv(netdev);
        u32 mac_ctrl_data = 0;
@@ -370,7 +371,8 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
        return 0;
 }
 
-static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1e_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -384,9 +386,10 @@ static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atl1e_set_features(struct net_device *netdev, u32 features)
+static int atl1e_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atl1e_vlan_mode(netdev, features);
index 33a4e35f5ee822a5b1377084dea4c6cc7ae5ba84..9bd2049766480331e6a0f3355ac84174a822e9d5 100644 (file)
@@ -3365,7 +3365,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
                sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
        drvinfo->eedump_len = ATL1_EEDUMP_LEN;
index 1feae5928a4b0060707c1f8b6111caed46ea66f5..071f4c858969dad5ff166d69f62073f4c0518b09 100644 (file)
@@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
     synchronize_irq(adapter->pdev->irq);
 }
 
-static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -372,7 +372,8 @@ static void __atl2_vlan_mode(u32 features, u32 *ctrl)
        }
 }
 
-static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+static void atl2_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atl2_adapter *adapter = netdev_priv(netdev);
        u32 ctrl;
@@ -391,7 +392,8 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter)
        atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
 }
 
-static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl2_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -405,9 +407,10 @@ static u32 atl2_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atl2_set_features(struct net_device *netdev, u32 features)
+static int atl2_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atl2_vlan_mode(netdev, features);
@@ -2049,10 +2052,12 @@ static void atl2_get_drvinfo(struct net_device *netdev,
 {
        struct atl2_adapter *adapter = netdev_priv(netdev);
 
-       strncpy(drvinfo->driver,  atl2_driver_name, 32);
-       strncpy(drvinfo->version, atl2_driver_version, 32);
-       strncpy(drvinfo->fw_version, "L2", 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  atl2_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, atl2_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = atl2_get_regs_len(netdev);
index aabcf4b5745a31df2de00a1f6dcdd9a015b33b76..8ff7411094d58d545b5845874adc164b54e7c07f 100644 (file)
@@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work)
        spin_unlock_irqrestore(&adapter->lock, flags);
 }
 
-static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -222,7 +222,8 @@ static void __atlx_vlan_mode(u32 features, u32 *ctrl)
        }
 }
 
-static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+static void atlx_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atlx_adapter *adapter = netdev_priv(netdev);
        unsigned long flags;
@@ -242,7 +243,8 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
        atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
 }
 
-static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atlx_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -256,9 +258,10 @@ static u32 atlx_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atlx_set_features(struct net_device *netdev, u32 features)
+static int atlx_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atlx_vlan_mode(netdev, features);
index 4cf835dbc1222f6c90154b601bc02b76d6efbbb6..3fb66d09ece59e463c81eb20b77ba7cfa7df9fd0 100644 (file)
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
                                 skb->len,
                                 DMA_TO_DEVICE);
                rp->skb = NULL;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_irq(skb);
        }
 
        bp->tx_cons = cons;
index 965c7235804d61aa4c493b472239a29bde66bcb7..021fb818007a7932fa9eb918d188f9507aff517d 100644 (file)
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.1.11"
-#define DRV_MODULE_RELDATE     "July 20, 2011"
-#define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.2.1.fw"
+#define DRV_MODULE_VERSION     "2.2.1"
+#define DRV_MODULE_RELDATE     "Dec 18, 2011"
+#define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.2.3.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1b.fw"
 #define FW_RV2P_FILE_09_Ax     "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
 #define FW_RV2P_FILE_09                "bnx2/bnx2-rv2p-09-6.0.17.fw"
 
@@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
        mutex_lock(&bp->cnic_lock);
        cp->drv_state = 0;
        bnapi->cnic_present = 0;
-       rcu_assign_pointer(bp->cnic_ops, NULL);
+       RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_lock);
        synchronize_rcu();
        return 0;
@@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock)
 
        if (bp->autoneg & AUTONEG_SPEED) {
                u32 adv_reg, adv1000_reg;
-               u32 new_adv_reg = 0;
-               u32 new_adv1000_reg = 0;
+               u32 new_adv = 0;
+               u32 new_adv1000 = 0;
 
                bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
                adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock)
                bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
                adv1000_reg &= PHY_ALL_1000_SPEED;
 
-               if (bp->advertising & ADVERTISED_10baseT_Half)
-                       new_adv_reg |= ADVERTISE_10HALF;
-               if (bp->advertising & ADVERTISED_10baseT_Full)
-                       new_adv_reg |= ADVERTISE_10FULL;
-               if (bp->advertising & ADVERTISED_100baseT_Half)
-                       new_adv_reg |= ADVERTISE_100HALF;
-               if (bp->advertising & ADVERTISED_100baseT_Full)
-                       new_adv_reg |= ADVERTISE_100FULL;
-               if (bp->advertising & ADVERTISED_1000baseT_Full)
-                       new_adv1000_reg |= ADVERTISE_1000FULL;
+               new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+               new_adv |= ADVERTISE_CSMA;
+               new_adv |= bnx2_phy_get_pause_adv(bp);
 
-               new_adv_reg |= ADVERTISE_CSMA;
+               new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
 
-               new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
-               if ((adv1000_reg != new_adv1000_reg) ||
-                       (adv_reg != new_adv_reg) ||
+               if ((adv1000_reg != new_adv1000) ||
+                       (adv_reg != new_adv) ||
                        ((bmcr & BMCR_ANENABLE) == 0)) {
 
-                       bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
-                       bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+                       bnx2_write_phy(bp, bp->mii_adv, new_adv);
+                       bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
                                BMCR_ANENABLE);
                }
@@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
 }
 
 static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
 {
-       struct sk_buff *skb;
+       u8 *data;
        struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
        dma_addr_t mapping;
        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
-       unsigned long align;
 
-       skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
-       if (skb == NULL) {
+       data = kmalloc(bp->rx_buf_size, gfp);
+       if (!data)
                return -ENOMEM;
-       }
 
-       if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
-               skb_reserve(skb, BNX2_RX_ALIGN - align);
-
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+       mapping = dma_map_single(&bp->pdev->dev,
+                                get_l2_fhdr(data),
+                                bp->rx_buf_use_size,
                                 PCI_DMA_FROMDEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
-               dev_kfree_skb(skb);
+               kfree(data);
                return -EIO;
        }
 
-       rx_buf->skb = skb;
-       rx_buf->desc = (struct l2_fhdr *) skb->data;
+       rx_buf->data = data;
        dma_unmap_addr_set(rx_buf, mapping, mapping);
 
        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
        u16 hw_cons, sw_cons, sw_ring_cons;
        int tx_pkt = 0, index;
+       unsigned int tx_bytes = 0;
        struct netdev_queue *txq;
 
        index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 
                sw_cons = NEXT_TX_BD(sw_cons);
 
+               tx_bytes += skb->len;
                dev_kfree_skb(skb);
                tx_pkt++;
                if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        hw_cons = bnx2_get_hw_tx_cons(bnapi);
        }
 
+       netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
        txr->hw_tx_cons = hw_cons;
        txr->tx_cons = sw_cons;
 
@@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
 }
 
 static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
-                 struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+                  u8 *data, u16 cons, u16 prod)
 {
        struct sw_bd *cons_rx_buf, *prod_rx_buf;
        struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
 
        rxr->rx_prod_bseq += bp->rx_buf_use_size;
 
-       prod_rx_buf->skb = skb;
-       prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+       prod_rx_buf->data = data;
 
        if (cons == prod)
                return;
@@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
 }
 
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
            unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
            u32 ring_idx)
 {
        int err;
        u16 prod = ring_idx & 0xffff;
+       struct sk_buff *skb;
 
-       err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+       err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
        if (unlikely(err)) {
-               bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+               bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
                if (hdr_len) {
                        unsigned int raw_len = len + 4;
                        int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
 
                        bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
                }
-               return err;
+               return NULL;
        }
 
-       skb_reserve(skb, BNX2_RX_OFFSET);
        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
                         PCI_DMA_FROMDEVICE);
-
+       skb = build_skb(data);
+       if (!skb) {
+               kfree(data);
+               goto error;
+       }
+       skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
        if (hdr_len == 0) {
                skb_put(skb, len);
-               return 0;
+               return skb;
        } else {
                unsigned int i, frag_len, frag_size, pages;
                struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                                        skb_frag_size_sub(frag, tail);
                                        skb->data_len -= tail;
                                }
-                               return 0;
+                               return skb;
                        }
                        rx_pg = &rxr->rx_pg_ring[pg_cons];
 
@@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                                rxr->rx_pg_prod = pg_prod;
                                bnx2_reuse_rx_skb_pages(bp, rxr, skb,
                                                        pages - i);
-                               return err;
+                               return NULL;
                        }
 
                        dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                rxr->rx_pg_prod = pg_prod;
                rxr->rx_pg_cons = pg_cons;
        }
-       return 0;
+       return skb;
 }
 
 static inline u16
@@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                struct sw_bd *rx_buf, *next_rx_buf;
                struct sk_buff *skb;
                dma_addr_t dma_addr;
+               u8 *data;
 
                sw_ring_cons = RX_RING_IDX(sw_cons);
                sw_ring_prod = RX_RING_IDX(sw_prod);
 
                rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
-               skb = rx_buf->skb;
-               prefetchw(skb);
-
-               next_rx_buf =
-                       &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
-               prefetch(next_rx_buf->desc);
+               data = rx_buf->data;
+               rx_buf->data = NULL;
 
-               rx_buf->skb = NULL;
+               rx_hdr = get_l2_fhdr(data);
+               prefetch(rx_hdr);
 
                dma_addr = dma_unmap_addr(rx_buf, mapping);
 
@@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
                        PCI_DMA_FROMDEVICE);
 
-               rx_hdr = rx_buf->desc;
+               next_rx_buf =
+                       &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+               prefetch(get_l2_fhdr(next_rx_buf->data));
+
                len = rx_hdr->l2_fhdr_pkt_len;
                status = rx_hdr->l2_fhdr_status;
 
@@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                                       L2_FHDR_ERRORS_TOO_SHORT |
                                       L2_FHDR_ERRORS_GIANT_FRAME))) {
 
-                       bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+                       bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
                                          sw_ring_prod);
                        if (pg_ring_used) {
                                int pages;
@@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                len -= 4;
 
                if (len <= bp->rx_copy_thresh) {
-                       struct sk_buff *new_skb;
-
-                       new_skb = netdev_alloc_skb(bp->dev, len + 6);
-                       if (new_skb == NULL) {
-                               bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+                       skb = netdev_alloc_skb(bp->dev, len + 6);
+                       if (skb == NULL) {
+                               bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
                                                  sw_ring_prod);
                                goto next_rx;
                        }
 
                        /* aligned copy */
-                       skb_copy_from_linear_data_offset(skb,
-                                                        BNX2_RX_OFFSET - 6,
-                                     new_skb->data, len + 6);
-                       skb_reserve(new_skb, 6);
-                       skb_put(new_skb, len);
+                       memcpy(skb->data,
+                              (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+                              len + 6);
+                       skb_reserve(skb, 6);
+                       skb_put(skb, len);
 
-                       bnx2_reuse_rx_skb(bp, rxr, skb,
+                       bnx2_reuse_rx_data(bp, rxr, data,
                                sw_ring_cons, sw_ring_prod);
 
-                       skb = new_skb;
-               } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
-                          dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
-                       goto next_rx;
-
+               } else {
+                       skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+                                         (sw_ring_cons << 16) | sw_ring_prod);
+                       if (!skb)
+                               goto next_rx;
+               }
                if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
                    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
                        __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
        ring_prod = prod = rxr->rx_prod;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+               if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
                                    ring_num, i, bp->rx_ring_size);
                        break;
@@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
        rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
 
        rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
-               sizeof(struct skb_shared_info);
+               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
        bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
        bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
        }
 
        bp->rx_buf_use_size = rx_size;
-       /* hw alignment */
-       bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+       /* hw alignment + build_skb() overhead*/
+       bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+               NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
        bp->rx_ring_size = size;
        bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        }
                        dev_kfree_skb(skb);
                }
+               netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
        }
 }
 
@@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
 
                for (j = 0; j < bp->rx_max_ring_idx; j++) {
                        struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
-                       struct sk_buff *skb = rx_buf->skb;
+                       u8 *data = rx_buf->data;
 
-                       if (skb == NULL)
+                       if (data == NULL)
                                continue;
 
                        dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
                                         bp->rx_buf_use_size,
                                         PCI_DMA_FROMDEVICE);
 
-                       rx_buf->skb = NULL;
+                       rx_buf->data = NULL;
 
-                       dev_kfree_skb(skb);
+                       kfree(data);
                }
                for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
                        bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@ static int
 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 {
        unsigned int pkt_size, num_pkts, i;
-       struct sk_buff *skb, *rx_skb;
+       struct sk_buff *skb;
+       u8 *data;
        unsigned char *packet;
        u16 rx_start_idx, rx_idx;
        dma_addr_t map;
@@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        }
 
        rx_buf = &rxr->rx_buf_ring[rx_start_idx];
-       rx_skb = rx_buf->skb;
+       data = rx_buf->data;
 
-       rx_hdr = rx_buf->desc;
-       skb_reserve(rx_skb, BNX2_RX_OFFSET);
+       rx_hdr = get_l2_fhdr(data);
+       data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
 
        dma_sync_single_for_cpu(&bp->pdev->dev,
                dma_unmap_addr(rx_buf, mapping),
-               bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+               bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
 
        if (rx_hdr->l2_fhdr_status &
                (L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        }
 
        for (i = 14; i < pkt_size; i++) {
-               if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+               if (*(data + i) != (unsigned char) (i & 0xff)) {
                        goto loopback_test_done;
                }
        }
@@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        prod = NEXT_TX_BD(prod);
        txr->tx_prod_bseq += skb->len;
 
@@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       strcpy(info->bus_info, pci_name(bp->pdev));
-       strcpy(info->fw_version, bp->fw_version);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+       strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
 }
 
 #define BNX2_REGDUMP_LEN               (32 * 1024)
@@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
        return 0;
 }
 
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
@@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features)
 }
 
 static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
index 99d31a7d6aaab6d04a64c529c55700a9d74562e9..1db2d51ba3f16cd017a7d01a9bd95f37c8011c95 100644 (file)
@@ -6563,12 +6563,25 @@ struct l2_fhdr {
 #define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
 #define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)
 
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
 struct sw_bd {
-       struct sk_buff          *skb;
-       struct l2_fhdr          *desc;
+       u8                      *data;
        DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+       return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
 struct sw_pg {
        struct page             *page;
        DEFINE_DMA_UNMAP_ADDR(mapping);
index aec7212ac9835a4aba89b24641238a0e2d08448e..8c73d34b2ff1d59b4b9f9766244d96b16ca00dd6 100644 (file)
@@ -23,8 +23,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.70.30-0"
-#define DRV_MODULE_RELDATE      "2011/10/25"
+#define DRV_MODULE_VERSION      "1.70.35-0"
+#define DRV_MODULE_RELDATE      "2011/11/10"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
@@ -293,8 +293,13 @@ enum {
 #define FCOE_TXQ_IDX(bp)       (MAX_ETH_TXQ_IDX(bp))
 
 /* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
 struct sw_rx_bd {
-       struct sk_buff  *skb;
+       u8              *data;
        DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
@@ -411,8 +416,7 @@ union db_prod {
 
 
 /* Number of u64 elements in SGE mask array */
-#define RX_SGE_MASK_LEN                        ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
-                                        BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN                        (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
 #define RX_SGE_MASK_LEN_MASK           (RX_SGE_MASK_LEN - 1)
 #define NEXT_SGE_MASK_ELEM(el)         (((el) + 1) & RX_SGE_MASK_LEN_MASK)
 
@@ -425,8 +429,8 @@ union host_hc_status_block {
 
 struct bnx2x_agg_info {
        /*
-        * First aggregation buffer is an skb, the following - are pages.
-        * We will preallocate the skbs for each aggregation when
+        * First aggregation buffer is a data buffer, the following - are pages.
+        * We will preallocate the data buffer for each aggregation when
         * we open the interface and will replace the BD at the consumer
         * with this one when we receive the TPA_START CQE in order to
         * keep the Rx BD ring consistent.
@@ -440,6 +444,7 @@ struct bnx2x_agg_info {
        u16                     parsing_flags;
        u16                     vlan_tag;
        u16                     len_on_bd;
+       u32                     rxhash;
 };
 
 #define Q_STATS_OFFSET32(stat_name) \
@@ -507,6 +512,7 @@ struct bnx2x_fastpath {
        __le16                  fp_hc_idx;
 
        u8                      index;          /* number in fp array */
+       u8                      rx_queue;       /* index for skb_record */
        u8                      cl_id;          /* eth client id */
        u8                      cl_qzone_id;
        u8                      fw_sb_id;       /* status block number in FW */
@@ -881,6 +887,8 @@ struct bnx2x_common {
 #define CHIP_PORT_MODE_NONE                    0x2
 #define CHIP_MODE(bp)                  (bp->common.chip_port_mode)
 #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+       u32                     boot_mode;
 };
 
 /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1042,6 +1050,8 @@ struct bnx2x_slowpath {
 
        u32                             wb_comp;
        u32                             wb_data[4];
+
+       union drv_info_to_mcp           drv_info_to_mcp;
 };
 
 #define bnx2x_sp(bp, var)              (&bp->slowpath->var)
@@ -1122,18 +1132,21 @@ enum {
 enum {
        BNX2X_PORT_QUERY_IDX,
        BNX2X_PF_QUERY_IDX,
+       BNX2X_FCOE_QUERY_IDX,
        BNX2X_FIRST_QUEUE_QUERY_IDX,
 };
 
 struct bnx2x_fw_stats_req {
        struct stats_query_header hdr;
-       struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+       struct stats_query_entry query[FP_SB_MAX_E1x+
+               BNX2X_FIRST_QUEUE_QUERY_IDX];
 };
 
 struct bnx2x_fw_stats_data {
        struct stats_counter    storm_counters;
        struct per_port_stats   port;
        struct per_pf_stats     pf;
+       struct fcoe_statistics_params   fcoe;
        struct per_queue_stats  queue_stats[1];
 };
 
@@ -1141,6 +1154,7 @@ struct bnx2x_fw_stats_data {
 enum {
        BNX2X_SP_RTNL_SETUP_TC,
        BNX2X_SP_RTNL_TX_TIMEOUT,
+       BNX2X_SP_RTNL_FAN_FAILURE,
 };
 
 
@@ -1186,10 +1200,20 @@ struct bnx2x {
 #define ETH_MAX_JUMBO_PACKET_SIZE      9600
 
        /* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT           ((L1_CACHE_SHIFT < 8) ? \
-                                        L1_CACHE_SHIFT : 8)
-       /* FW use 2 Cache lines Alignment for start packet and size  */
-#define BNX2X_FW_RX_ALIGN              (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT           min(8, L1_CACHE_SHIFT)
+
+       /* FW uses 2 Cache lines Alignment for start packet and size
+        *
+        * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+        * at the end of skb->data, to avoid wasting a full cache line.
+        * This reduces memory use (skb->truesize).
+        */
+#define BNX2X_FW_RX_ALIGN_START        (1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END                                  \
+       max(1UL << BNX2X_RX_ALIGN_SHIFT,                        \
+           SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
 #define BNX2X_PXP_DRAM_ALIGN           (BNX2X_RX_ALIGN_SHIFT - 5)
 
        struct host_sp_status_block *def_status_blk;
@@ -1249,6 +1273,7 @@ struct bnx2x {
 #define NO_ISCSI_OOO_FLAG              (1 << 13)
 #define NO_ISCSI_FLAG                  (1 << 14)
 #define NO_FCOE_FLAG                   (1 << 15)
+#define BC_SUPPORTS_PFC_STATS          (1 << 17)
 
 #define NO_ISCSI(bp)           ((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)       ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1984,13 +2009,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
                              AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
 
-#define RSS_FLAGS(bp) \
-               (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
-                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
-                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
-                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
-                (bp->multi_mode << \
-                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
 #define MULTI_MASK                     0x7f
 
 
@@ -2055,6 +2073,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define BNX2X_VPD_LEN                  128
 #define VENDOR_ID_LEN                  4
 
+int bnx2x_close(struct net_device *dev);
+
 /* Congestion management fairness mode */
 #define CMNG_FNS_NONE          0
 #define CMNG_FNS_MINMAX                1
@@ -2072,4 +2092,16 @@ static const u32 dmae_reg_go_c[] = {
 
 void bnx2x_set_ethtool_ops(struct net_device *netdev);
 void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+
+#define BNX2X_MF_PROTOCOL(bp) \
+       ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#ifdef BCM_CNIC
+#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
+       (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#endif
+
 #endif /* bnx2x.h */
index 580b44edb066eafe779b8d55953a5d70cf8546f8..2b731b253598e9f94da434cdd322cfd197b0cb74 100644 (file)
@@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
  * @to:                destination FP index
  *
  * Makes sure the contents of the bp->fp[to].napi is kept
- * intact.
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target
  */
 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
 {
        struct bnx2x_fastpath *from_fp = &bp->fp[from];
        struct bnx2x_fastpath *to_fp = &bp->fp[to];
-       struct napi_struct orig_napi = to_fp->napi;
+
+       /* Copy the NAPI object as it has been already initialized */
+       from_fp->napi = to_fp->napi;
+
        /* Move bnx2x_fastpath contents */
        memcpy(to_fp, from_fp, sizeof(*to_fp));
        to_fp->index = to;
-
-       /* Restore the NAPI object as it has been already initialized */
-       to_fp->napi = orig_napi;
 }
 
 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -100,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
  * return idx of last bd freed
  */
 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
-                            u16 idx)
+                            u16 idx, unsigned int *pkts_compl,
+                            unsigned int *bytes_compl)
 {
        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
        struct eth_tx_start_bd *tx_start_bd;
@@ -157,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 
        /* release skb */
        WARN_ON(!skb);
+       if (skb) {
+               (*pkts_compl)++;
+               (*bytes_compl) += skb->len;
+       }
        dev_kfree_skb_any(skb);
        tx_buf->first_bd = 0;
        tx_buf->skb = NULL;
@@ -168,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 {
        struct netdev_queue *txq;
        u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (unlikely(bp->panic))
@@ -187,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
                                      " pkt_cons %u\n",
                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 
-               bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+               bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+                   &pkts_compl, &bytes_compl);
+
                sw_cons++;
        }
 
+       netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
        txdata->tx_pkt_cons = sw_cons;
        txdata->tx_bd_cons = bd_cons;
 
@@ -292,8 +304,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
           fp->last_max_sge, fp->rx_sge_prod);
 }
 
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+                           const struct eth_fast_path_rx_cqe *cqe)
+{
+       /* Set Toeplitz hash from CQE */
+       if ((bp->dev->features & NETIF_F_RXHASH) &&
+           (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+               return le32_to_cpu(cqe->rss_hash_result);
+       return 0;
+}
+
 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
-                           struct sk_buff *skb, u16 cons, u16 prod,
+                           u16 cons, u16 prod,
                            struct eth_fast_path_rx_cqe *cqe)
 {
        struct bnx2x *bp = fp->bp;
@@ -308,9 +333,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 
-       /* Try to map an empty skb from the aggregation info  */
+       /* Try to map an empty data buffer from the aggregation info  */
        mapping = dma_map_single(&bp->pdev->dev,
-                                first_buf->skb->data,
+                                first_buf->data + NET_SKB_PAD,
                                 fp->rx_buf_size, DMA_FROM_DEVICE);
        /*
         *  ...if it fails - move the skb from the consumer to the producer
@@ -320,15 +345,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                /* Move the BD from the consumer to the producer */
-               bnx2x_reuse_rx_skb(fp, cons, prod);
+               bnx2x_reuse_rx_data(fp, cons, prod);
                tpa_info->tpa_state = BNX2X_TPA_ERROR;
                return;
        }
 
-       /* move empty skb from pool to prod */
-       prod_rx_buf->skb = first_buf->skb;
+       /* move empty data from pool to prod */
+       prod_rx_buf->data = first_buf->data;
        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-       /* point prod_bd to new skb */
+       /* point prod_bd to new data */
        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 
@@ -342,6 +367,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
        tpa_info->tpa_state = BNX2X_TPA_START;
        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
        tpa_info->placement_offset = cqe->placement_offset;
+       tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
 
 #ifdef BNX2X_STOP_ON_ERROR
        fp->tpa_queue_used |= (1 << queue);
@@ -469,11 +495,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 {
        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
-       u8 pad = tpa_info->placement_offset;
+       u32 pad = tpa_info->placement_offset;
        u16 len = tpa_info->len_on_bd;
-       struct sk_buff *skb = rx_buf->skb;
+       struct sk_buff *skb = NULL;
+       u8 *data = rx_buf->data;
        /* alloc new skb */
-       struct sk_buff *new_skb;
+       u8 *new_data;
        u8 old_tpa_state = tpa_info->tpa_state;
 
        tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -484,18 +511,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        if (old_tpa_state == BNX2X_TPA_ERROR)
                goto drop;
 
-       /* Try to allocate the new skb */
-       new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+       /* Try to allocate the new data */
+       new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
 
        /* Unmap skb in the pool anyway, as we are going to change
           pool entry status to BNX2X_TPA_STOP even if new skb allocation
           fails. */
        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
                         fp->rx_buf_size, DMA_FROM_DEVICE);
+       if (likely(new_data))
+               skb = build_skb(data);
 
-       if (likely(new_skb)) {
-               prefetch(skb);
-               prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+       if (likely(skb)) {
 
 #ifdef BNX2X_STOP_ON_ERROR
                if (pad + len > fp->rx_buf_size) {
@@ -507,8 +534,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 #endif
 
-               skb_reserve(skb, pad);
+               skb_reserve(skb, pad + NET_SKB_PAD);
                skb_put(skb, len);
+               skb->rxhash = tpa_info->rxhash;
 
                skb->protocol = eth_type_trans(skb, bp->dev);
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -524,8 +552,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 
 
-               /* put new skb in bin */
-               rx_buf->skb = new_skb;
+               /* put new data in bin */
+               rx_buf->data = new_data;
 
                return;
        }
@@ -537,19 +565,6 @@ drop:
        fp->eth_q_stats.rx_skb_alloc_failed++;
 }
 
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
-                                       struct sk_buff *skb)
-{
-       /* Set Toeplitz hash from CQE */
-       if ((bp->dev->features & NETIF_F_RXHASH) &&
-           (cqe->fast_path_cqe.status_flags &
-            ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
-               skb->rxhash =
-               le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -592,6 +607,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                u8 cqe_fp_flags;
                enum eth_rx_cqe_type cqe_fp_type;
                u16 len, pad;
+               u8 *data;
 
 #ifdef BNX2X_STOP_ON_ERROR
                if (unlikely(bp->panic))
@@ -602,13 +618,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                bd_prod = RX_BD(bd_prod);
                bd_cons = RX_BD(bd_cons);
 
-               /* Prefetch the page containing the BD descriptor
-                  at producer's index. It will be needed when new skb is
-                  allocated */
-               prefetch((void *)(PAGE_ALIGN((unsigned long)
-                                            (&fp->rx_desc_ring[bd_prod])) -
-                                 PAGE_SIZE + 1));
-
                cqe = &fp->rx_comp_ring[comp_ring_cons];
                cqe_fp = &cqe->fast_path_cqe;
                cqe_fp_flags = cqe_fp->type_error_flags;
@@ -624,138 +633,123 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
                        bnx2x_sp_event(fp, cqe);
                        goto next_cqe;
+               }
+               rx_buf = &fp->rx_buf_ring[bd_cons];
+               data = rx_buf->data;
 
-               /* this is an rx packet */
-               } else {
-                       rx_buf = &fp->rx_buf_ring[bd_cons];
-                       skb = rx_buf->skb;
-                       prefetch(skb);
-
-                       if (!CQE_TYPE_FAST(cqe_fp_type)) {
+               if (!CQE_TYPE_FAST(cqe_fp_type)) {
 #ifdef BNX2X_STOP_ON_ERROR
-                               /* sanity check */
-                               if (fp->disable_tpa &&
-                                   (CQE_TYPE_START(cqe_fp_type) ||
-                                    CQE_TYPE_STOP(cqe_fp_type)))
-                                       BNX2X_ERR("START/STOP packet while "
-                                                 "disable_tpa type %x\n",
-                                                 CQE_TYPE(cqe_fp_type));
+                       /* sanity check */
+                       if (fp->disable_tpa &&
+                           (CQE_TYPE_START(cqe_fp_type) ||
+                            CQE_TYPE_STOP(cqe_fp_type)))
+                               BNX2X_ERR("START/STOP packet while "
+                                         "disable_tpa type %x\n",
+                                         CQE_TYPE(cqe_fp_type));
 #endif
 
-                               if (CQE_TYPE_START(cqe_fp_type)) {
-                                       u16 queue = cqe_fp->queue_index;
-                                       DP(NETIF_MSG_RX_STATUS,
-                                          "calling tpa_start on queue %d\n",
-                                          queue);
+                       if (CQE_TYPE_START(cqe_fp_type)) {
+                               u16 queue = cqe_fp->queue_index;
+                               DP(NETIF_MSG_RX_STATUS,
+                                  "calling tpa_start on queue %d\n",
+                                  queue);
 
-                                       bnx2x_tpa_start(fp, queue, skb,
-                                                       bd_cons, bd_prod,
-                                                       cqe_fp);
-
-                                       /* Set Toeplitz hash for LRO skb */
-                                       bnx2x_set_skb_rxhash(bp, cqe, skb);
-
-                                       goto next_rx;
-
-                               } else {
-                                       u16 queue =
-                                               cqe->end_agg_cqe.queue_index;
-                                       DP(NETIF_MSG_RX_STATUS,
-                                          "calling tpa_stop on queue %d\n",
-                                          queue);
-
-                                       bnx2x_tpa_stop(bp, fp, queue,
-                                                      &cqe->end_agg_cqe,
-                                                      comp_ring_cons);
+                               bnx2x_tpa_start(fp, queue,
+                                               bd_cons, bd_prod,
+                                               cqe_fp);
+                               goto next_rx;
+                       } else {
+                               u16 queue =
+                                       cqe->end_agg_cqe.queue_index;
+                               DP(NETIF_MSG_RX_STATUS,
+                                  "calling tpa_stop on queue %d\n",
+                                  queue);
+
+                               bnx2x_tpa_stop(bp, fp, queue,
+                                              &cqe->end_agg_cqe,
+                                              comp_ring_cons);
 #ifdef BNX2X_STOP_ON_ERROR
-                                       if (bp->panic)
-                                               return 0;
+                               if (bp->panic)
+                                       return 0;
 #endif
 
-                                       bnx2x_update_sge_prod(fp, cqe_fp);
-                                       goto next_cqe;
-                               }
+                               bnx2x_update_sge_prod(fp, cqe_fp);
+                               goto next_cqe;
                        }
-                       /* non TPA */
-                       len = le16_to_cpu(cqe_fp->pkt_len);
-                       pad = cqe_fp->placement_offset;
-                       dma_sync_single_for_cpu(&bp->pdev->dev,
+               }
+               /* non TPA */
+               len = le16_to_cpu(cqe_fp->pkt_len);
+               pad = cqe_fp->placement_offset;
+               dma_sync_single_for_cpu(&bp->pdev->dev,
                                        dma_unmap_addr(rx_buf, mapping),
-                                                      pad + RX_COPY_THRESH,
-                                                      DMA_FROM_DEVICE);
-                       prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+                                       pad + RX_COPY_THRESH,
+                                       DMA_FROM_DEVICE);
+               pad += NET_SKB_PAD;
+               prefetch(data + pad); /* speedup eth_type_trans() */
+               /* is this an error packet? */
+               if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+                       DP(NETIF_MSG_RX_ERR,
+                          "ERROR  flags %x  rx packet %u\n",
+                          cqe_fp_flags, sw_comp_cons);
+                       fp->eth_q_stats.rx_err_discard_pkt++;
+                       goto reuse_rx;
+               }
 
-                       /* is this an error packet? */
-                       if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+               /* Since we don't have a jumbo ring
+                * copy small packets if mtu > 1500
+                */
+               if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+                   (len <= RX_COPY_THRESH)) {
+                       skb = netdev_alloc_skb_ip_align(bp->dev, len);
+                       if (skb == NULL) {
                                DP(NETIF_MSG_RX_ERR,
-                                  "ERROR  flags %x  rx packet %u\n",
-                                  cqe_fp_flags, sw_comp_cons);
-                               fp->eth_q_stats.rx_err_discard_pkt++;
+                                  "ERROR  packet dropped because of alloc failure\n");
+                               fp->eth_q_stats.rx_skb_alloc_failed++;
                                goto reuse_rx;
                        }
-
-                       /* Since we don't have a jumbo ring
-                        * copy small packets if mtu > 1500
-                        */
-                       if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
-                           (len <= RX_COPY_THRESH)) {
-                               struct sk_buff *new_skb;
-
-                               new_skb = netdev_alloc_skb(bp->dev, len + pad);
-                               if (new_skb == NULL) {
-                                       DP(NETIF_MSG_RX_ERR,
-                                          "ERROR  packet dropped "
-                                          "because of alloc failure\n");
-                                       fp->eth_q_stats.rx_skb_alloc_failed++;
-                                       goto reuse_rx;
-                               }
-
-                               /* aligned copy */
-                               skb_copy_from_linear_data_offset(skb, pad,
-                                                   new_skb->data + pad, len);
-                               skb_reserve(new_skb, pad);
-                               skb_put(new_skb, len);
-
-                               bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
-
-                               skb = new_skb;
-
-                       } else
-                       if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+                       memcpy(skb->data, data + pad, len);
+                       bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+               } else {
+                       if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
                                dma_unmap_single(&bp->pdev->dev,
-                                       dma_unmap_addr(rx_buf, mapping),
+                                                dma_unmap_addr(rx_buf, mapping),
                                                 fp->rx_buf_size,
                                                 DMA_FROM_DEVICE);
+                               skb = build_skb(data);
+                               if (unlikely(!skb)) {
+                                       kfree(data);
+                                       fp->eth_q_stats.rx_skb_alloc_failed++;
+                                       goto next_rx;
+                               }
                                skb_reserve(skb, pad);
-                               skb_put(skb, len);
-
                        } else {
                                DP(NETIF_MSG_RX_ERR,
                                   "ERROR  packet dropped because "
                                   "of alloc failure\n");
                                fp->eth_q_stats.rx_skb_alloc_failed++;
 reuse_rx:
-                               bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+                               bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
                                goto next_rx;
                        }
+               }
 
-                       skb->protocol = eth_type_trans(skb, bp->dev);
+               skb_put(skb, len);
+               skb->protocol = eth_type_trans(skb, bp->dev);
 
-                       /* Set Toeplitz hash for a none-LRO skb */
-                       bnx2x_set_skb_rxhash(bp, cqe, skb);
+               /* Set Toeplitz hash for a none-LRO skb */
+               skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
 
-                       skb_checksum_none_assert(skb);
+               skb_checksum_none_assert(skb);
 
-                       if (bp->dev->features & NETIF_F_RXCSUM) {
+               if (bp->dev->features & NETIF_F_RXCSUM) {
 
-                               if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                               else
-                                       fp->eth_q_stats.hw_csum_err++;
-                       }
+                       if (likely(BNX2X_RX_CSUM_OK(cqe)))
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       else
+                               fp->eth_q_stats.hw_csum_err++;
                }
 
-               skb_record_rx_queue(skb, fp->index);
+               skb_record_rx_queue(skb, fp->rx_queue);
 
                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
                    PARSING_FLAGS_VLAN)
@@ -765,7 +759,7 @@ reuse_rx:
 
 
 next_rx:
-               rx_buf->skb = NULL;
+               rx_buf->data = NULL;
 
                bd_cons = NEXT_RX_IDX(bd_cons);
                bd_prod = NEXT_RX_IDX(bd_prod);
@@ -1011,9 +1005,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                                struct sw_rx_bd *first_buf =
                                        &tpa_info->first_buf;
 
-                               first_buf->skb = netdev_alloc_skb(bp->dev,
-                                                      fp->rx_buf_size);
-                               if (!first_buf->skb) {
+                               first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
+                                                         GFP_ATOMIC);
+                               if (!first_buf->data) {
                                        BNX2X_ERR("Failed to allocate TPA "
                                                  "skb pool for queue[%d] - "
                                                  "disabling TPA on this "
@@ -1093,16 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
                struct bnx2x_fastpath *fp = &bp->fp[i];
                for_each_cos_in_tx_queue(fp, cos) {
                        struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+                       unsigned pkts_compl = 0, bytes_compl = 0;
 
-                       u16 bd_cons = txdata->tx_bd_cons;
                        u16 sw_prod = txdata->tx_pkt_prod;
                        u16 sw_cons = txdata->tx_pkt_cons;
 
                        while (sw_cons != sw_prod) {
-                               bd_cons = bnx2x_free_tx_pkt(bp, txdata,
-                                                           TX_BD(sw_cons));
+                               bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+                                   &pkts_compl, &bytes_compl);
                                sw_cons++;
                        }
+                       netdev_tx_reset_queue(
+                           netdev_get_tx_queue(bp->dev, txdata->txq_index));
                }
        }
 }
@@ -1118,16 +1114,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
 
        for (i = 0; i < NUM_RX_BD; i++) {
                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
-               struct sk_buff *skb = rx_buf->skb;
+               u8 *data = rx_buf->data;
 
-               if (skb == NULL)
+               if (data == NULL)
                        continue;
                dma_unmap_single(&bp->pdev->dev,
                                 dma_unmap_addr(rx_buf, mapping),
                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 
-               rx_buf->skb = NULL;
-               dev_kfree_skb(skb);
+               rx_buf->data = NULL;
+               kfree(data);
        }
 }
 
@@ -1445,6 +1441,11 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
                break;
        }
 
+#ifdef BCM_CNIC
+       /* override in ISCSI SD mod */
+       if (IS_MF_ISCSI_SD(bp))
+               bp->num_queues = 1;
+#endif
        /* Add special queues */
        bp->num_queues += NON_ETH_CONTEXT_USE;
 }
@@ -1509,6 +1510,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
 
        for_each_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
+               u32 mtu;
 
                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
                if (IS_FCOE_IDX(i))
@@ -1518,13 +1520,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
                         * overrun attack.
                         */
-                       fp->rx_buf_size =
-                               BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
-                               BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+                       mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
                else
-                       fp->rx_buf_size =
-                               bp->dev->mtu + ETH_OVREHEAD +
-                               BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+                       mtu = bp->dev->mtu;
+               fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+                                 IP_HEADER_ALIGNMENT_PADDING +
+                                 ETH_OVREHEAD +
+                                 mtu +
+                                 BNX2X_FW_RX_ALIGN_END;
+               /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
        }
 }
 
@@ -1541,7 +1545,8 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
        if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
                for (i = 0; i < sizeof(ind_table); i++)
                        ind_table[i] =
-                               bp->fp->cl_id + (i % num_eth_queues);
+                               bp->fp->cl_id +
+                               ethtool_rxfh_indir_default(i, num_eth_queues);
        }
 
        /*
@@ -1929,13 +1934,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                break;
        }
 
-       if (!bp->port.pmf)
+       if (bp->port.pmf)
+               bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+       else
                bnx2x__link_status_update(bp);
 
        /* start the timer */
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 
 #ifdef BCM_CNIC
+       /* re-read iscsi info */
+       bnx2x_get_iscsi_info(bp);
        bnx2x_setup_cnic_irq_info(bp);
        if (bp->state == BNX2X_STATE_OPEN)
                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
@@ -2799,6 +2808,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
                                           skb_frag_size(frag), DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+                       unsigned int pkts_compl = 0, bytes_compl = 0;
 
                        DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
                                                "dropping packet...\n");
@@ -2810,7 +2820,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                         */
                        first_bd->nbd = cpu_to_le16(nbd);
                        bnx2x_free_tx_pkt(bp, txdata,
-                                         TX_BD(txdata->tx_pkt_prod));
+                                         TX_BD(txdata->tx_pkt_prod),
+                                         &pkts_compl, &bytes_compl);
                        return NETDEV_TX_OK;
                }
 
@@ -2871,6 +2882,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                   pbd_e2->parsing_data);
        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        txdata->tx_pkt_prod++;
        /*
         * Make sure that the BD data is updated before updating the producer
@@ -2981,9 +2994,14 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
        struct bnx2x *bp = netdev_priv(dev);
        int rc = 0;
 
-       if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+       if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
                return -EINVAL;
 
+#ifdef BCM_CNIC
+       if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+               return -EINVAL;
+#endif
+
        if (netif_running(dev))  {
                rc = bnx2x_set_eth_mac(bp, false);
                if (rc)
@@ -3098,7 +3116,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
        u8 cos;
        int rx_ring_size = 0;
 
-       /* if rx_ring_size specified - use it */
+#ifdef BCM_CNIC
+       if (IS_MF_ISCSI_SD(bp)) {
+               rx_ring_size = MIN_RX_SIZE_NONTPA;
+               bp->rx_ring_size = rx_ring_size;
+       } else
+#endif
        if (!bp->rx_ring_size) {
 
                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
@@ -3108,7 +3131,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
                                     MIN_RX_SIZE_TPA, rx_ring_size);
 
                bp->rx_ring_size = rx_ring_size;
-       } else
+       } else /* if rx_ring_size specified - use it */
                rx_ring_size = bp->rx_ring_size;
 
        /* Common */
@@ -3278,14 +3301,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
        msix_table_size = bp->igu_sb_cnt + 1;
 
        /* fp array: RSS plus CNIC related L2 queues */
-       fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+       fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
                     sizeof(*fp), GFP_KERNEL);
        if (!fp)
                goto alloc_err;
        bp->fp = fp;
 
        /* msix table */
-       tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+       tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
        if (!tbl)
                goto alloc_err;
        bp->msix_table = tbl;
@@ -3409,7 +3432,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
        return bnx2x_reload_if_running(dev);
 }
 
-u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
@@ -3420,7 +3444,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-int bnx2x_set_features(struct net_device *dev, u32 features)
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 flags = bp->flags;
index 283d663da18022b848b04b976186140d436eac78..bf27c54ff2e0d5ac093e7f59f3db75f89715bdc4 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 
 
 #include "bnx2x.h"
@@ -533,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
  */
 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
 #endif
-u32 bnx2x_fix_features(struct net_device *dev, u32 features);
-int bnx2x_set_features(struct net_device *dev, u32 features);
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+       netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
 
 /**
  * bnx2x_tx_timeout - tx timeout netdev callback
@@ -874,8 +876,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
 {
        /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
-       memset(fp->sge_mask, 0xff,
-              (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+       memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
 
        /* Clear the two last indices in the page to 1:
           these are the indices that correspond to the "next" element,
@@ -911,26 +912,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
        return 0;
 }
 
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
-                                    struct bnx2x_fastpath *fp, u16 index)
+static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
+                                     struct bnx2x_fastpath *fp, u16 index)
 {
-       struct sk_buff *skb;
+       u8 *data;
        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
        dma_addr_t mapping;
 
-       skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
-       if (unlikely(skb == NULL))
+       data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+       if (unlikely(data == NULL))
                return -ENOMEM;
 
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+       mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+                                fp->rx_buf_size,
                                 DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-               dev_kfree_skb_any(skb);
+               kfree(data);
                return -ENOMEM;
        }
 
-       rx_buf->skb = skb;
+       rx_buf->data = data;
        dma_unmap_addr_set(rx_buf, mapping, mapping);
 
        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -939,12 +941,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
        return 0;
 }
 
-/* note that we are not allocating a new skb,
+/* note that we are not allocating a new buffer,
  * we are just moving one from cons to prod
  * we are not creating a new mapping,
  * so there is no need to check for dma_mapping_error().
  */
-static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
                                      u16 cons, u16 prod)
 {
        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -954,7 +956,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
 
        dma_unmap_addr_set(prod_rx_buf, mapping,
                           dma_unmap_addr(cons_rx_buf, mapping));
-       prod_rx_buf->skb = cons_rx_buf->skb;
+       prod_rx_buf->data = cons_rx_buf->data;
        *prod_bd = *cons_bd;
 }
 
@@ -1030,9 +1032,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
        for (i = 0; i < last; i++) {
                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
-               struct sk_buff *skb = first_buf->skb;
+               u8 *data = first_buf->data;
 
-               if (skb == NULL) {
+               if (data == NULL) {
                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
                        continue;
                }
@@ -1040,8 +1042,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(first_buf, mapping),
                                         fp->rx_buf_size, DMA_FROM_DEVICE);
-               dev_kfree_skb(skb);
-               first_buf->skb = NULL;
+               kfree(data);
+               first_buf->data = NULL;
        }
 }
 
@@ -1149,7 +1151,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
         * fp->eth_q_stats.rx_skb_alloc_failed = 0
         */
        for (i = 0; i < rx_ring_size; i++) {
-               if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+               if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
                        fp->eth_q_stats.rx_skb_alloc_failed++;
                        continue;
                }
@@ -1318,6 +1320,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
        struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
        unsigned long q_type = 0;
 
+       bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
        bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
                                                     BNX2X_FCOE_ETH_CL_ID_IDX);
        /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
@@ -1488,4 +1491,77 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
        return max_cfg;
 }
 
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp:                driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+       return 2 * vn + BP_PORT(bp);
+}
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp:                driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+       int func;
+       int vn;
+
+       /* Set the attention towards other drivers on the same port */
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+               if (vn == BP_VN(bp))
+                       continue;
+
+               func = func_by_vn(bp, vn);
+               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+                      (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+       }
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp:                driver handle
+ * @flags:     flags to update
+ * @set:       set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+       if (SHMEM2_HAS(bp, drv_flags)) {
+               u32 drv_flags;
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+               drv_flags = SHMEM2_RD(bp, drv_flags);
+
+               if (set)
+                       SET_FLAGS(drv_flags, flags);
+               else
+                       RESET_FLAGS(drv_flags, flags);
+
+               SHMEM2_WR(bp, drv_flags, drv_flags);
+               DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+               bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+       }
+}
+
+static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
+{
+       if (is_valid_ether_addr(addr))
+               return true;
+#ifdef BCM_CNIC
+       if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+               return true;
+#endif
+       return false;
+}
+
 #endif /* BNX2X_CMN_H */
index 51bd7485ab189f11f3c9b3aa4b7e0c59557a987c..5051cf3deb2090bbeae422b3e588e46b8753788c 100644 (file)
@@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
 }
 #endif
 
-static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
-{
-       if (SHMEM2_HAS(bp, drv_flags)) {
-               u32 drv_flags;
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
-               drv_flags = SHMEM2_RD(bp, drv_flags);
-
-               if (set)
-                       SET_FLAGS(drv_flags, flags);
-               else
-                       RESET_FLAGS(drv_flags, flags);
-
-               SHMEM2_WR(bp, drv_flags, drv_flags);
-               DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
-               bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
-       }
-}
-
 static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
 {
        u8 prio, cos;
@@ -755,18 +737,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                        /* mark DCBX result for PMF migration */
                        bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
 #ifdef BCM_DCBNL
-                       /**
+                       /*
                         * Add new app tlvs to dcbnl
                         */
                        bnx2x_dcbnl_update_applist(bp, false);
 #endif
-                       bnx2x_dcbx_stop_hw_tx(bp);
-
-                       /* reconfigure the netdevice with the results of the new
+                       /*
+                        * reconfigure the netdevice with the results of the new
                         * dcbx negotiation.
                         */
                        bnx2x_dcbx_update_tc_mapping(bp);
 
+                       /*
+                        * allow other funtions to update their netdevices
+                        * accordingly
+                        */
+                       if (IS_MF(bp))
+                               bnx2x_link_sync_notify(bp);
+
+                       bnx2x_dcbx_stop_hw_tx(bp);
+
                        return;
                }
        case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -775,6 +765,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 
                bnx2x_dcbx_update_ets_params(bp);
                bnx2x_dcbx_resume_hw_tx(bp);
+
                return;
        case BNX2X_DCBX_STATE_TX_RELEASED:
                DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -883,7 +874,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
                /*For IEEE admin_recommendation_bw_precentage
                 *For IEEE admin_recommendation_ets_pg */
                af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
                        if (dp->admin_priority_app_table[i].valid) {
                                struct bnx2x_admin_priority_app_table *table =
                                        dp->admin_priority_app_table;
@@ -923,7 +914,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
 
 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
 {
-       if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+       if (!CHIP_IS_E1x(bp)) {
                bp->dcb_state = dcb_on;
                bp->dcbx_enabled = dcbx_enabled;
        } else {
@@ -1863,7 +1854,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
 void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
 {
        /* if we need to syncronize DCBX result from prev PMF
-        * read it from shmem and update bp accordingly
+        * read it from shmem and update bp and netdev accordingly
         */
        if (SHMEM2_HAS(bp, drv_flags) &&
           GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
@@ -1875,6 +1866,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
                                          bp->dcbx_error);
                bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
                                         bp->dcbx_error);
+#ifdef BCM_DCBNL
+               /*
+                * Add new app tlvs to dcbnl
+                */
+               bnx2x_dcbnl_update_applist(bp, false);
+               /*
+                * Send a notification for the new negotiated parameters
+                */
+               dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+               /*
+                * reconfigure the netdevice with the results of the new
+                * dcbx negotiation.
+                */
+               bnx2x_dcbx_update_tc_mapping(bp);
+
        }
 }
 
@@ -2242,7 +2249,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
        int i, ff;
 
        /* iterate over the app entries looking for idtype and idval */
-       for (i = 0, ff = -1; i < 4; i++) {
+       for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
                struct bnx2x_admin_priority_app_table *app_ent =
                        &bp->dcbx_config_params.admin_priority_app_table[i];
                if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
@@ -2251,7 +2258,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
                if (ff < 0 && !app_ent->valid)
                        ff = i;
        }
-       if (i < 4)
+       if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
                /* if found overwrite up */
                bp->dcbx_config_params.
                        admin_priority_app_table[i].priority = up;
index 2c6a3bca6f284cb82674fb73da8a41cf04325c39..2ab9254e2d5eff4b7cf9ccb5a02cc38df66a9f06 100644 (file)
@@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table {
                u32 app_id;
 };
 
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
 struct bnx2x_config_dcbx_params {
        u32 overwrite_settings;
        u32 admin_dcbx_version;
@@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params {
        u32 admin_recommendation_bw_precentage[8];
        u32 admin_recommendation_ets_pg[8];
        u32 admin_pfc_bitmap;
-       struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+       struct bnx2x_admin_priority_app_table
+               admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
        u32 admin_default_priority;
 };
 
index f0ca8b27a55eea7b96f3984b03aaead457ba3db8..a688b9d975a2576f5285417d3a23ed53a5c3fb5b 100644 (file)
@@ -107,6 +107,10 @@ static const struct {
                                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
        { STATS_OFFSET32(mf_tag_discard),
                                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+       { STATS_OFFSET32(pfc_frames_received_hi),
+                               8, STATS_FLAGS_PORT, "pfc_frames_received" },
+       { STATS_OFFSET32(pfc_frames_sent_hi),
+                               8, STATS_FLAGS_PORT, "pfc_frames_sent" },
        { STATS_OFFSET32(brb_drop_hi),
                                8, STATS_FLAGS_PORT, "rx_brb_discard" },
        { STATS_OFFSET32(brb_truncate_hi),
@@ -352,7 +356,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                DP(NETIF_MSG_LINK, "Unsupported port type\n");
                return -EINVAL;
        }
-       /* Save new config in case command complete successuly */
+       /* Save new config in case command complete successully */
        new_multi_phy_config = bp->link_params.multi_phy_config;
        /* Get the new cfg_idx */
        cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -761,8 +765,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
        struct bnx2x *bp = netdev_priv(dev);
        u8 phy_fw_ver[PHY_FW_VER_LEN];
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 
        phy_fw_ver[0] = '\0';
        if (bp->port.pmf) {
@@ -773,14 +777,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
                bnx2x_release_phy_lock(bp);
        }
 
-       strncpy(info->fw_version, bp->fw_ver, 32);
+       strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
        snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
                 "bc %d.%d.%d%s%s",
                 (bp->common.bc_ver & 0xff0000) >> 16,
                 (bp->common.bc_ver & 0xff00) >> 8,
                 (bp->common.bc_ver & 0xff),
                 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
-       strcpy(info->bus_info, pci_name(bp->pdev));
+       strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
        info->n_stats = BNX2X_NUM_STATS;
        info->testinfo_len = BNX2X_NUM_TESTS;
        info->eedump_len = bp->common.flash_size;
@@ -1740,6 +1744,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        struct sw_rx_bd *rx_buf;
        u16 len;
        int rc = -ENODEV;
+       u8 *data;
+       struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 
        /* check the loopback mode */
        switch (loopback_mode) {
@@ -1748,8 +1754,18 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
                        return -EINVAL;
                break;
        case BNX2X_MAC_LOOPBACK:
-               bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
-                                               LOOPBACK_XMAC : LOOPBACK_BMAC;
+               if (CHIP_IS_E3(bp)) {
+                       int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+                       if (bp->port.supported[cfg_idx] &
+                           (SUPPORTED_10000baseT_Full |
+                            SUPPORTED_20000baseMLD2_Full |
+                            SUPPORTED_20000baseKR2_Full))
+                               bp->link_params.loopback_mode = LOOPBACK_XMAC;
+                       else
+                               bp->link_params.loopback_mode = LOOPBACK_UMAC;
+               } else
+                       bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
                break;
        default:
@@ -1784,6 +1800,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
        rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        pkt_prod = txdata->tx_pkt_prod++;
        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
        tx_buf->first_bd = txdata->tx_bd_prod;
@@ -1865,10 +1883,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        dma_sync_single_for_cpu(&bp->pdev->dev,
                                   dma_unmap_addr(rx_buf, mapping),
                                   fp_rx->rx_buf_size, DMA_FROM_DEVICE);
-       skb = rx_buf->skb;
-       skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+       data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
        for (i = ETH_HLEN; i < pkt_size; i++)
-               if (*(skb->data + i) != (unsigned char) (i & 0xff))
+               if (*(data + i) != (unsigned char) (i & 0xff))
                        goto test_loopback_rx_exit;
 
        rc = 0;
@@ -2285,18 +2302,20 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
        }
 }
 
-static int bnx2x_get_rxfh_indir(struct net_device *dev,
-                               struct ethtool_rxfh_indir *indir)
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
+               0 : T_ETH_INDIRECTION_TABLE_SIZE);
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       size_t copy_size =
-               min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
        u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
        size_t i;
 
-       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-               return -EOPNOTSUPP;
-
        /* Get the current configuration of the RSS indirection table */
        bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
 
@@ -2309,33 +2328,19 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
         * align the returned table to the Client ID of the leading RSS
         * queue.
         */
-       for (i = 0; i < copy_size; i++)
-               indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
-
-       indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+       for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+               indir[i] = ind_table[i] - bp->fp->cl_id;
 
        return 0;
 }
 
-static int bnx2x_set_rxfh_indir(struct net_device *dev,
-                               const struct ethtool_rxfh_indir *indir)
+static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
 {
        struct bnx2x *bp = netdev_priv(dev);
        size_t i;
        u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
-       u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
-
-       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-               return -EOPNOTSUPP;
-
-       /* validate the size */
-       if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
-               return -EINVAL;
 
        for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
-               /* validate the indices */
-               if (indir->ring_index[i] >= num_eth_queues)
-                       return -EINVAL;
                /*
                 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
                 * as an internal storage of an indirection table is a u8 array
@@ -2345,7 +2350,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
                 * align the received table to the Client ID of the leading RSS
                 * queue
                 */
-               ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+               ind_table[i] = indir[i] + bp->fp->cl_id;
        }
 
        return bnx2x_config_rss_pf(bp, ind_table, false);
@@ -2378,6 +2383,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
        .set_phys_id            = bnx2x_set_phys_id,
        .get_ethtool_stats      = bnx2x_get_ethtool_stats,
        .get_rxnfc              = bnx2x_get_rxnfc,
+       .get_rxfh_indir_size    = bnx2x_get_rxfh_indir_size,
        .get_rxfh_indir         = bnx2x_get_rxfh_indir,
        .set_rxfh_indir         = bnx2x_set_rxfh_indir,
 };
index fc754cb6cc0fbf95967f40ba37ae4aca0b50b049..3e30c8642c2658bfe7b928294971d7a0cc6cb2d6 100644 (file)
@@ -1247,11 +1247,14 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
        #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
        #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+       #define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 
        #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG         0xb0000000
        #define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
        #define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+       #define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
+       #define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
 
        #define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
        #define REQ_BC_VER_4_SET_MF_BW                  0x00060202
@@ -1304,6 +1307,8 @@ struct drv_func_mb {
        #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
        #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
        #define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+       #define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
+       #define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
 
        #define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
        #define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
@@ -1360,6 +1365,7 @@ struct drv_func_mb {
 
        #define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
        #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+       #define DRV_STATUS_DRV_INFO_REQ                 0x04000000
 
        u32 virt_mac_upper;
        #define VIRT_MAC_SIGN_MASK                      0xffff0000
@@ -1964,9 +1970,38 @@ struct shmem2_region {
        u32 extended_dev_info_shared_addr;
        u32 ncsi_oem_data_addr;
 
-       u32 ocsd_host_addr;
-       u32 ocbb_host_addr;
-       u32 ocsd_req_update_interval;
+       u32 ocsd_host_addr; /* initialized by option ROM */
+       u32 ocbb_host_addr; /* initialized by option ROM */
+       u32 ocsd_req_update_interval; /* initialized by option ROM */
+       u32 temperature_in_half_celsius;
+       u32 glob_struct_in_host;
+
+       u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE                    0x00000000
+
+       u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+
+       u32 extended_dev_info_shared_cfg_size;
+
+       u32 dcbx_en[PORT_MAX];
+
+       /* The offset points to the multi threaded meta structure */
+       u32 multi_thread_data_offset;
+
+       /* address of DMAable host address holding values from the drivers */
+       u32 drv_info_host_addr_lo;
+       u32 drv_info_host_addr_hi;
+
+       /* general values written by the MFW (such as current version) */
+       u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK          0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT         0
+#define DRV_INFO_CONTROL_OP_CODE_MASK      0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT     8
 };
 
 
@@ -2501,14 +2536,18 @@ struct mac_stx {
 #define MAC_STX_IDX_MAX                     2
 
 struct host_port_stats {
-       u32            host_port_stats_start;
+       u32            host_port_stats_counter;
 
        struct mac_stx mac_stx[MAC_STX_IDX_MAX];
 
        u32            brb_drop_hi;
        u32            brb_drop_lo;
 
-       u32            host_port_stats_end;
+       u32            not_used; /* obsolete */
+       u32            pfc_frames_tx_hi;
+       u32            pfc_frames_tx_lo;
+       u32            pfc_frames_rx_hi;
+       u32            pfc_frames_rx_lo;
 };
 
 
@@ -2548,6 +2587,118 @@ struct host_func_stats {
 /* VIC definitions */
 #define VICSTATST_UIF_INDEX 2
 
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+       ETH_STATS_OPCODE,
+       FCOE_STATS_OPCODE,
+       ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN      12
+/*  Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+       /* Function's Driver Version. padded to 12 */
+       u8 version[ETH_STAT_INFO_VERSION_LEN];
+       /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+       u8 mac_local[8];
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       u8 mac_add2[8];         /* Additional Programmed MAC Addr 2. */
+       u32 mtu_size;           /* MTU Size. Note   : Negotiated MTU */
+       u32 feature_flags;      /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK                0x01
+#define FEATURE_ETH_LSO_MASK                   0x02
+#define FEATURE_ETH_BOOTMODE_MASK              0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT             2
+#define FEATURE_ETH_BOOTMODE_NONE              (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE               (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI             (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE              (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK                   0x20
+       u32 lso_max_size;       /* LSO MaxOffloadSize. */
+       u32 lso_min_seg_cnt;    /* LSO MinSegmentCount. */
+       /* Num Offloaded Connections TCP_IPv4. */
+       u32 ipv4_ofld_cnt;
+       /* Num Offloaded Connections TCP_IPv6. */
+       u32 ipv6_ofld_cnt;
+       u32 promiscuous_mode;   /* Promiscuous Mode. non-zero true */
+       u32 txq_size;           /* TX Descriptors Queue Size */
+       u32 rxq_size;           /* RX Descriptors Queue Size */
+       /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+       u32 txq_avg_depth;
+       /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+       u32 rxq_avg_depth;
+       /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+       u32 iov_offload;
+       /* Number of NetQueue/VMQ Config'd. */
+       u32 netq_cnt;
+       u32 vf_cnt;             /* Num VF assigned to this PF. */
+};
+
+/*  Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+       u8 version[12];         /* Function's Driver Version. */
+       u8 mac_local[8];        /* Locally Admin Addr. */
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       u8 mac_add2[8];         /* Additional Programmed MAC Addr 2. */
+       /* QoS Priority (per 802.1p). 0-7255 */
+       u32 qos_priority;
+       u32 txq_size;           /* FCoE TX Descriptors Queue Size. */
+       u32 rxq_size;           /* FCoE RX Descriptors Queue Size. */
+       /* FCoE TX Descriptor Queue Avg Depth. */
+       u32 txq_avg_depth;
+       /* FCoE RX Descriptors Queue Avg Depth. */
+       u32 rxq_avg_depth;
+       u32 rx_frames_lo;       /* FCoE RX Frames received. */
+       u32 rx_frames_hi;       /* FCoE RX Frames received. */
+       u32 rx_bytes_lo;        /* FCoE RX Bytes received. */
+       u32 rx_bytes_hi;        /* FCoE RX Bytes received. */
+       u32 tx_frames_lo;       /* FCoE TX Frames sent. */
+       u32 tx_frames_hi;       /* FCoE TX Frames sent. */
+       u32 tx_bytes_lo;        /* FCoE TX Bytes sent. */
+       u32 tx_bytes_hi;        /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI  Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+       u8 version[12];         /* Function's Driver Version. */
+       u8 mac_local[8];        /* Locally Admin iSCSI MAC Addr. */
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       /* QoS Priority (per 802.1p). 0-7255 */
+       u32 qos_priority;
+       u8 initiator_name[64];  /* iSCSI Boot Initiator Node name. */
+       u8 ww_port_name[64];    /* iSCSI World wide port name */
+       u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+       u8 boot_target_ip[16];  /* iSCSI Boot Target IP. */
+       u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+       u8 boot_init_ip[16];    /* iSCSI Boot Initiator IP Address. */
+       u32 max_frame_size;     /* Max Frame Size. bytes */
+       u32 txq_size;           /* PDU TX Descriptors Queue Size. */
+       u32 rxq_size;           /* PDU RX Descriptors Queue Size. */
+       u32 txq_avg_depth;      /* PDU TX Descriptor Queue Avg Depth. */
+       u32 rxq_avg_depth;      /* PDU RX Descriptors Queue Avg Depth. */
+       u32 rx_pdus_lo;         /* iSCSI PDUs received. */
+       u32 rx_pdus_hi;         /* iSCSI PDUs received. */
+       u32 rx_bytes_lo;        /* iSCSI RX Bytes received. */
+       u32 rx_bytes_hi;        /* iSCSI RX Bytes received. */
+       u32 tx_pdus_lo;         /* iSCSI PDUs sent. */
+       u32 tx_pdus_hi;         /* iSCSI PDUs sent. */
+       u32 tx_bytes_lo;        /* iSCSI PDU TX Bytes sent. */
+       u32 tx_bytes_hi;        /* iSCSI PDU TX Bytes sent. */
+       u32 pcp_prior_map_tbl;  /* C-PCP to S-PCP Priority MapTable.
+                                * 9 nibbles, the position of each nibble
+                                * represents the C-PCP value, the value
+                                * of the nibble = S-PCP value.
+                                */
+};
+
+union drv_info_to_mcp {
+       struct eth_stats_info   ether_stat;
+       struct fcoe_stats_info  fcoe_stat;
+       struct iscsi_stats_info iscsi_stat;
+};
 #define BCM_5710_FW_MAJOR_VERSION                      7
 #define BCM_5710_FW_MINOR_VERSION                      0
 #define BCM_5710_FW_REVISION_VERSION           29
@@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers {
 
 
 /*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
  */
+struct fcoe_rx_stat_params_section0 {
+       __le32 fcoe_rx_pkt_cnt;
+       __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+       __le32 fcoe_ver_cnt;
+       __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+       __le32 fc_crc_cnt;
+       __le32 eofa_del_cnt;
+       __le32 miss_frame_cnt;
+       __le32 seq_timeout_cnt;
+       __le32 drop_seq_cnt;
+       __le32 fcoe_rx_drop_pkt_cnt;
+       __le32 fcp_rx_pkt_cnt;
+       __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+       __le32 fcoe_tx_pkt_cnt;
+       __le32 fcoe_tx_byte_cnt;
+       __le32 fcp_tx_pkt_cnt;
+       __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+       struct fcoe_tx_stat_params tx_stat;
+       struct fcoe_rx_stat_params_section0 rx_stat0;
+       struct fcoe_rx_stat_params_section1 rx_stat1;
+       struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
 struct cfc_del_event_data {
        u32 cid;
        u32 reserved0;
index bce203fa4b9e274c24e06e52b133a0936328c4e5..4df9505b67b62e1d482460cc83fcfb6999a83746 100644 (file)
@@ -27,7 +27,6 @@
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 
-
 /********************************************************/
 #define ETH_HLEN                       14
 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 #define EDC_MODE_LIMITING                              0x0044
 #define EDC_MODE_PASSIVE_DAC                   0x0055
 
+/* BRB default for class 0 E2 */
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR     170
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR              250
+#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR              10
+#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR               50
 
 /* BRB thresholds for E2*/
 #define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE            170
 #define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE                      50
 #define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE          250
 
+/* BRB default for class 0 E3A0 */
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR   290
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR    410
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR    10
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR     50
+
 /* BRB thresholds for E3A0 */
 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE          290
 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE              0
 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE            50
 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE                410
 
+/* BRB default for E3B0 */
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR   330
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR    490
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR    15
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR     55
 
 /* BRB thresholds for E3B0 2 port mode*/
 #define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE               1025
 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE         50
 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE     384
 
-
 /* only for E3B0*/
 #define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR                       304
 #define PFC_E3B0_4P_BRB_FULL_LB_XON_THR                        384
-#define PFC_E3B0_4P_LB_GUART                           120
+#define PFC_E3B0_4P_LB_GUART           120
 
 #define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART            120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST               80
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST       80
 
 #define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART            80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST               120
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST       120
+
+/* Pause defines*/
+#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR                      330
+#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR                       490
+#define DEFAULT_E3B0_LB_GUART          40
+
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART           40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST      0
+
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART           40
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST      0
 
+/* ETS defines*/
 #define DCBX_INVALID_COS                                       (0xFF)
 
 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND                (0x5000)
@@ -440,7 +466,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
        u32 min_w_val = 0;
        /* Calculate min_w_val.*/
        if (vars->link_up) {
-               if (SPEED_20000 == vars->line_speed)
+               if (vars->line_speed == SPEED_20000)
                        min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
                else
                        min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
@@ -490,7 +516,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
                   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
 
-       if (0 == port) {
+       if (!port) {
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
                        credit_upper_bound);
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
@@ -584,7 +610,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
                   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
                   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
-       if (0 == port) {
+       if (!port) {
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
@@ -612,7 +638,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
        * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
        * port mode port1 has COS0-2 that can be used for WFQ.
        */
-       if (0 == port) {
+       if (!port) {
                base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
                max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
        } else {
@@ -674,7 +700,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
        * In 2 port mode port0 has COS0-5 that can be used for WFQ.
        * In 4 port mode port1 has COS0-2 that can be used for WFQ.
        */
-       if (0 == port) {
+       if (!port) {
                base_weight = PBF_REG_COS0_WEIGHT_P0;
                max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
        } else {
@@ -846,34 +872,47 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
 ******************************************************************************/
 static int bnx2x_ets_e3b0_get_total_bw(
        const struct link_params *params,
-       const struct bnx2x_ets_params *ets_params,
+       struct bnx2x_ets_params *ets_params,
        u16 *total_bw)
 {
        struct bnx2x *bp = params->bp;
        u8 cos_idx = 0;
+       u8 is_bw_cos_exist = 0;
 
        *total_bw = 0 ;
+
        /* Calculate total BW requested */
        for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
-               if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+               if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+                       is_bw_cos_exist = 1;
+                       if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+                               DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+                                                  "was set to 0\n");
+                               /*
+                                * This is to prevent a state when ramrods
+                                * can't be sent
+                               */
+                               ets_params->cos[cos_idx].params.bw_params.bw
+                                        = 1;
+                       }
                        *total_bw +=
                                ets_params->cos[cos_idx].params.bw_params.bw;
                }
        }
 
        /* Check total BW is valid */
-       if ((100 != *total_bw) || (0 == *total_bw)) {
-               if (0 == *total_bw) {
+       if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+               if (*total_bw == 0) {
                        DP(NETIF_MSG_LINK,
-                          "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+                          "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
                        return -EINVAL;
                }
                DP(NETIF_MSG_LINK,
-                  "bnx2x_ets_E3B0_config toatl BW should be 100\n");
-               /**
-               *   We can handle a case whre the BW isn't 100 this can happen
-               *   if the TC are joined.
-               */
+                  "bnx2x_ets_E3B0_config total BW should be 100\n");
+               /*
+                * We can handle a case whre the BW isn't 100 this can happen
+                * if the TC are joined.
+                */
        }
        return 0;
 }
@@ -904,7 +943,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
        const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
                DCBX_E3B0_MAX_NUM_COS_PORT0;
 
-       if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+       if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
                DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
                                   "parameter There can't be two COS's with "
                                   "the same strict pri\n");
@@ -913,7 +952,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
 
        if (pri > max_num_of_cos) {
                DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
-                              "parameter Illegal strict priority\n");
+                  "parameter Illegal strict priority\n");
            return -EINVAL;
        }
 
@@ -995,8 +1034,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
 
        /* Set all the strict priority first */
        for (i = 0; i < max_num_of_cos; i++) {
-               if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
-                       if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+               if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+                       if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
                                DP(NETIF_MSG_LINK,
                                           "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
                                           "invalid cos entry\n");
@@ -1010,7 +1049,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
                            sp_pri_to_cos[i], pri_set);
                        pri_bitmask = 1 << sp_pri_to_cos[i];
                        /* COS is used remove it from bitmap.*/
-                       if (0 == (pri_bitmask & cos_bit_to_set)) {
+                       if (!(pri_bitmask & cos_bit_to_set)) {
                                DP(NETIF_MSG_LINK,
                                        "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
                                        "invalid There can't be two COS's with"
@@ -1072,7 +1111,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
 ******************************************************************************/
 int bnx2x_ets_e3b0_config(const struct link_params *params,
                         const struct link_vars *vars,
-                        const struct bnx2x_ets_params *ets_params)
+                        struct bnx2x_ets_params *ets_params)
 {
        struct bnx2x *bp = params->bp;
        int bnx2x_status = 0;
@@ -1105,15 +1144,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
        /* Prepare BW parameters*/
        bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
                                                   &total_bw);
-       if (0 != bnx2x_status) {
+       if (bnx2x_status) {
                DP(NETIF_MSG_LINK,
                   "bnx2x_ets_E3B0_config get_total_bw failed\n");
                return -EINVAL;
        }
 
-       /**
-        *  Upper bound is set according to current link speed (min_w_val
-        *  should be the same for upper bound and COS credit val).
+       /*
+        * Upper bound is set according to current link speed (min_w_val
+        * should be the same for upper bound and COS credit val).
         */
        bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
        bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
@@ -1122,7 +1161,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
        for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
                if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
                        cos_bw_bitmap |= (1 << cos_entry);
-                       /**
+                       /*
                         * The function also sets the BW in HW(not the mappin
                         * yet)
                         */
@@ -1146,7 +1185,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
                           "bnx2x_ets_e3b0_config cos state not valid\n");
                        return -EINVAL;
                }
-               if (0 != bnx2x_status) {
+               if (bnx2x_status) {
                        DP(NETIF_MSG_LINK,
                           "bnx2x_ets_e3b0_config set cos bw failed\n");
                        return bnx2x_status;
@@ -1157,7 +1196,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
        bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
                                                         sp_pri_to_cos);
 
-       if (0 != bnx2x_status) {
+       if (bnx2x_status) {
                DP(NETIF_MSG_LINK,
                   "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
                return bnx2x_status;
@@ -1168,7 +1207,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
                                              cos_sp_bitmap,
                                              cos_bw_bitmap);
 
-       if (0 != bnx2x_status) {
+       if (bnx2x_status) {
                DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
                return bnx2x_status;
        }
@@ -1232,9 +1271,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
 
        DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
 
-       if ((0 == total_bw) ||
-           (0 == cos0_bw) ||
-           (0 == cos1_bw)) {
+       if ((!total_bw) ||
+           (!cos0_bw) ||
+           (!cos1_bw)) {
                DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
                return;
        }
@@ -1290,7 +1329,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
         * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
         * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
         */
-       val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+       val = (!strict_cos) ? 0x2318 : 0x22E0;
        REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
 
        return 0;
@@ -1298,7 +1337,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
 /******************************************************************/
 /*                     PFC section                               */
 /******************************************************************/
-
 static void bnx2x_update_pfc_xmac(struct link_params *params,
                                  struct link_vars *vars,
                                  u8 is_lb)
@@ -1401,7 +1439,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
        if (!vars->link_up)
                return;
 
-       if (MAC_TYPE_EMAC == vars->mac_type) {
+       if (vars->mac_type == MAC_TYPE_EMAC) {
                DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
                bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
                                        pfc_frames_received);
@@ -1435,6 +1473,18 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
 
        udelay(40);
 }
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+       u32 port4mode_ovwr_val;
+       /* Check 4-port override enabled */
+       port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+       if (port4mode_ovwr_val & (1<<0)) {
+               /* Return 4-port mode override value */
+               return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+       }
+       /* Return 4-port mode from input pin */
+       return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
 
 static void bnx2x_emac_init(struct link_params *params,
                            struct link_vars *vars)
@@ -1601,31 +1651,18 @@ static void bnx2x_umac_enable(struct link_params *params,
 
 }
 
-static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
-{
-       u32 port4mode_ovwr_val;
-       /* Check 4-port override enabled */
-       port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
-       if (port4mode_ovwr_val & (1<<0)) {
-               /* Return 4-port mode override value */
-               return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
-       }
-       /* Return 4-port mode from input pin */
-       return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
-}
-
 /* Define the XMAC mode */
 static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
 {
        struct bnx2x *bp = params->bp;
        u32 is_port4mode = bnx2x_is_4_port_mode(bp);
 
-       /**
-       * In 4-port mode, need to set the mode only once, so if XMAC is
-       * already out of reset, it means the mode has already been set,
-       * and it must not* reset the XMAC again, since it controls both
-       * ports of the path
-       **/
+       /*
+        * In 4-port mode, need to set the mode only once, so if XMAC is
+        * already out of reset, it means the mode has already been set,
+        * and it must not* reset the XMAC again, since it controls both
+        * ports of the path
+        */
 
        if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
            (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -1743,6 +1780,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
 
        return 0;
 }
+
 static int bnx2x_emac_enable(struct link_params *params,
                             struct link_vars *vars, u8 lb)
 {
@@ -1999,7 +2037,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-
 /* PFC BRB internal port configuration params */
 struct bnx2x_pfc_brb_threshold_val {
        u32 pause_xoff;
@@ -2009,6 +2046,8 @@ struct bnx2x_pfc_brb_threshold_val {
 };
 
 struct bnx2x_pfc_brb_e3b0_val {
+       u32 per_class_guaranty_mode;
+       u32 lb_guarantied_hyst;
        u32 full_lb_xoff_th;
        u32 full_lb_xon_threshold;
        u32 lb_guarantied;
@@ -2021,6 +2060,9 @@ struct bnx2x_pfc_brb_e3b0_val {
 struct bnx2x_pfc_brb_th_val {
        struct bnx2x_pfc_brb_threshold_val pauseable_th;
        struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+       struct bnx2x_pfc_brb_threshold_val default_class0;
+       struct bnx2x_pfc_brb_threshold_val default_class1;
+
 };
 static int bnx2x_pfc_brb_get_config_params(
                                struct link_params *params,
@@ -2028,140 +2070,200 @@ static int bnx2x_pfc_brb_get_config_params(
 {
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+
+       config_val->default_class1.pause_xoff = 0;
+       config_val->default_class1.pause_xon = 0;
+       config_val->default_class1.full_xoff = 0;
+       config_val->default_class1.full_xon = 0;
+
        if (CHIP_IS_E2(bp)) {
+               /*  class0 defaults */
+               config_val->default_class0.pause_xoff =
+                       DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
+               config_val->default_class0.pause_xon =
+                       DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
+               config_val->default_class0.full_xoff =
+                       DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
+               config_val->default_class0.full_xon =
+                       DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
+               /*  pause able*/
                config_val->pauseable_th.pause_xoff =
-                   PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                config_val->pauseable_th.pause_xon =
-                   PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
                config_val->pauseable_th.full_xoff =
-                   PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
                config_val->pauseable_th.full_xon =
-                   PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
                /* non pause able*/
                config_val->non_pauseable_th.pause_xoff =
-                   PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.pause_xon =
-                   PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xoff =
-                   PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xon =
-                   PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
        } else if (CHIP_IS_E3A0(bp)) {
+               /*  class0 defaults */
+               config_val->default_class0.pause_xoff =
+                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
+               config_val->default_class0.pause_xon =
+                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
+               config_val->default_class0.full_xoff =
+                       DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
+               config_val->default_class0.full_xon =
+                       DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
+               /*  pause able */
                config_val->pauseable_th.pause_xoff =
-                   PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                config_val->pauseable_th.pause_xon =
-                   PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
                config_val->pauseable_th.full_xoff =
-                   PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
                config_val->pauseable_th.full_xon =
-                   PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
                /* non pause able*/
                config_val->non_pauseable_th.pause_xoff =
-                   PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.pause_xon =
-                   PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xoff =
-                   PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xon =
-                   PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
        } else if (CHIP_IS_E3B0(bp)) {
+               /*  class0 defaults */
+               config_val->default_class0.pause_xoff =
+                       DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
+               config_val->default_class0.pause_xon =
+                   DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
+               config_val->default_class0.full_xoff =
+                   DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
+               config_val->default_class0.full_xon =
+                   DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
+
                if (params->phy[INT_PHY].flags &
                    FLAGS_4_PORT_MODE) {
                        config_val->pauseable_th.pause_xoff =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                        config_val->pauseable_th.pause_xon =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
                        config_val->pauseable_th.full_xoff =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
                        config_val->pauseable_th.full_xon =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
                        /* non pause able*/
                        config_val->non_pauseable_th.pause_xoff =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
                        config_val->non_pauseable_th.pause_xon =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
                        config_val->non_pauseable_th.full_xoff =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
                        config_val->non_pauseable_th.full_xon =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-           } else {
-               config_val->pauseable_th.pause_xoff =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-               config_val->pauseable_th.pause_xon =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
-               config_val->pauseable_th.full_xoff =
-                   PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
-               config_val->pauseable_th.full_xon =
-                       PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
-               /* non pause able*/
-               config_val->non_pauseable_th.pause_xoff =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.pause_xon =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xoff =
-                   PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xon =
-                   PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-           }
+                       PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+               } else {
+                       config_val->pauseable_th.pause_xoff =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                       config_val->pauseable_th.pause_xon =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                       config_val->pauseable_th.full_xoff =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                       config_val->pauseable_th.full_xon =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+                       /* non pause able*/
+                       config_val->non_pauseable_th.pause_xoff =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       config_val->non_pauseable_th.pause_xon =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       config_val->non_pauseable_th.full_xoff =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       config_val->non_pauseable_th.full_xon =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+               }
        } else
            return -EINVAL;
 
        return 0;
 }
 
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
-                                                struct bnx2x_pfc_brb_e3b0_val
-                                                *e3b0_val,
-                                                u32 cos0_pauseable,
-                                                u32 cos1_pauseable)
+static void bnx2x_pfc_brb_get_e3b0_config_params(
+               struct link_params *params,
+               struct bnx2x_pfc_brb_e3b0_val
+               *e3b0_val,
+               struct bnx2x_nig_brb_pfc_port_params *pfc_params,
+               const u8 pfc_enabled)
 {
-       if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+       if (pfc_enabled && pfc_params) {
+               e3b0_val->per_class_guaranty_mode = 1;
+               e3b0_val->lb_guarantied_hyst = 80;
+
+               if (params->phy[INT_PHY].flags &
+                   FLAGS_4_PORT_MODE) {
+                       e3b0_val->full_lb_xoff_th =
+                               PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+                       e3b0_val->full_lb_xon_threshold =
+                               PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+                       e3b0_val->lb_guarantied =
+                               PFC_E3B0_4P_LB_GUART;
+                       e3b0_val->mac_0_class_t_guarantied =
+                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+                       e3b0_val->mac_0_class_t_guarantied_hyst =
+                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+                       e3b0_val->mac_1_class_t_guarantied =
+                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+                       e3b0_val->mac_1_class_t_guarantied_hyst =
+                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+               } else {
+                       e3b0_val->full_lb_xoff_th =
+                               PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+                       e3b0_val->full_lb_xon_threshold =
+                               PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+                       e3b0_val->mac_0_class_t_guarantied_hyst =
+                               PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+                       e3b0_val->mac_1_class_t_guarantied =
+                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+                       e3b0_val->mac_1_class_t_guarantied_hyst =
+                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+                       if (pfc_params->cos0_pauseable !=
+                               pfc_params->cos1_pauseable) {
+                               /* nonpauseable= Lossy + pauseable = Lossless*/
+                               e3b0_val->lb_guarantied =
+                                       PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+                               e3b0_val->mac_0_class_t_guarantied =
+                              PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+                       } else if (pfc_params->cos0_pauseable) {
+                               /* Lossless +Lossless*/
+                               e3b0_val->lb_guarantied =
+                                       PFC_E3B0_2P_PAUSE_LB_GUART;
+                               e3b0_val->mac_0_class_t_guarantied =
+                                  PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+                       } else {
+                               /* Lossy +Lossy*/
+                               e3b0_val->lb_guarantied =
+                                       PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+                               e3b0_val->mac_0_class_t_guarantied =
+                              PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+                       }
+               }
+       } else {
+               e3b0_val->per_class_guaranty_mode = 0;
+               e3b0_val->lb_guarantied_hyst = 0;
                e3b0_val->full_lb_xoff_th =
-                   PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+                       DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
                e3b0_val->full_lb_xon_threshold =
-                   PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+                       DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
                e3b0_val->lb_guarantied =
-                   PFC_E3B0_4P_LB_GUART;
+                       DEFAULT_E3B0_LB_GUART;
                e3b0_val->mac_0_class_t_guarantied =
-                   PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
                e3b0_val->mac_0_class_t_guarantied_hyst =
-                   PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
                e3b0_val->mac_1_class_t_guarantied =
-                   PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
                e3b0_val->mac_1_class_t_guarantied_hyst =
-                   PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
-       } else {
-               e3b0_val->full_lb_xoff_th =
-                   PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
-               e3b0_val->full_lb_xon_threshold =
-                   PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
-               e3b0_val->mac_0_class_t_guarantied_hyst =
-                   PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
-               e3b0_val->mac_1_class_t_guarantied =
-                   PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
-               e3b0_val->mac_1_class_t_guarantied_hyst =
-                   PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
-               if (cos0_pauseable != cos1_pauseable) {
-                       /* nonpauseable= Lossy + pauseable = Lossless*/
-                       e3b0_val->lb_guarantied =
-                           PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                           PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
-               } else if (cos0_pauseable) {
-                       /* Lossless +Lossless*/
-                       e3b0_val->lb_guarantied =
-                           PFC_E3B0_2P_PAUSE_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                           PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
-               } else {
-                       /* Lossy +Lossy*/
-                       e3b0_val->lb_guarantied =
-                           PFC_E3B0_2P_NON_PAUSE_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                           PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
-               }
+                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
        }
 }
 static int bnx2x_update_pfc_brb(struct link_params *params,
@@ -2172,23 +2274,28 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
        struct bnx2x *bp = params->bp;
        struct bnx2x_pfc_brb_th_val config_val = { {0} };
        struct bnx2x_pfc_brb_threshold_val *reg_th_config =
-           &config_val.pauseable_th;
+               &config_val.pauseable_th;
        struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
-       int set_pfc = params->feature_config_flags &
+       const int set_pfc = params->feature_config_flags &
                FEATURE_CONFIG_PFC_ENABLED;
+       const u8 pfc_enabled = (set_pfc && pfc_params);
        int bnx2x_status = 0;
        u8 port = params->port;
 
        /* default - pause configuration */
        reg_th_config = &config_val.pauseable_th;
        bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
-       if (0 != bnx2x_status)
+       if (bnx2x_status)
                return bnx2x_status;
 
-       if (set_pfc && pfc_params)
+       if (pfc_enabled) {
                /* First COS */
-               if (!pfc_params->cos0_pauseable)
+               if (pfc_params->cos0_pauseable)
+                       reg_th_config = &config_val.pauseable_th;
+               else
                        reg_th_config = &config_val.non_pauseable_th;
+       } else
+               reg_th_config = &config_val.default_class0;
        /*
         * The number of free blocks below which the pause signal to class 0
         * of MAC #n is asserted. n=0,1
@@ -2215,122 +2322,119 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
        REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
               BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
 
-       if (set_pfc && pfc_params) {
+       if (pfc_enabled) {
                /* Second COS */
                if (pfc_params->cos1_pauseable)
                        reg_th_config = &config_val.pauseable_th;
                else
                        reg_th_config = &config_val.non_pauseable_th;
+       } else
+               reg_th_config = &config_val.default_class1;
+       /*
+        * The number of free blocks below which the pause signal to
+        * class 1 of MAC #n is asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+              BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+              reg_th_config->pause_xoff);
+
+       /*
+        * The number of free blocks above which the pause signal to
+        * class 1 of MAC #n is de-asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+              BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+              reg_th_config->pause_xon);
+       /*
+        * The number of free blocks below which the full signal to
+        * class 1 of MAC #n is asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+              BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+              reg_th_config->full_xoff);
+       /*
+        * The number of free blocks above which the full signal to
+        * class 1 of MAC #n is de-asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+              BRB1_REG_FULL_1_XON_THRESHOLD_0,
+              reg_th_config->full_xon);
+
+       if (CHIP_IS_E3B0(bp)) {
+               bnx2x_pfc_brb_get_e3b0_config_params(
+                       params,
+                       &e3b0_val,
+                       pfc_params,
+                       pfc_enabled);
+
+               REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
+                          e3b0_val.per_class_guaranty_mode);
+
                /*
-                * The number of free blocks below which the pause signal to
-                * class 1 of MAC #n is asserted. n=0,1
-               **/
-               REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
-                      BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
-                      reg_th_config->pause_xoff);
+                * The hysteresis on the guarantied buffer space for the Lb
+                * port before signaling XON.
+                */
+               REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
+                          e3b0_val.lb_guarantied_hyst);
+
                /*
-                * The number of free blocks above which the pause signal to
-                * class 1 of MAC #n is de-asserted. n=0,1
+                * The number of free blocks below which the full signal to the
+                * LB port is asserted.
                 */
-               REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
-                      BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
-                      reg_th_config->pause_xon);
+               REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+                      e3b0_val.full_lb_xoff_th);
                /*
-                * The number of free blocks below which the full signal to
-                * class 1 of MAC #n is asserted. n=0,1
+                * The number of free blocks above which the full signal to the
+                * LB port is de-asserted.
                 */
-               REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
-                      BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
-                      reg_th_config->full_xoff);
+               REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+                      e3b0_val.full_lb_xon_threshold);
                /*
-                * The number of free blocks above which the full signal to
-                * class 1 of MAC #n is de-asserted. n=0,1
+                * The number of blocks guarantied for the MAC #n port. n=0,1
                 */
-               REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
-                      BRB1_REG_FULL_1_XON_THRESHOLD_0,
-                      reg_th_config->full_xon);
 
+               /* The number of blocks guarantied for the LB port.*/
+               REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+                      e3b0_val.lb_guarantied);
 
-               if (CHIP_IS_E3B0(bp)) {
-                       /*Should be done by init tool */
-                       /*
-                       * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
-                       * reset value
-                       * 944
-                       */
-
-                       /**
-                        * The hysteresis on the guarantied buffer space for the Lb port
-                        * before signaling XON.
-                        **/
-                       REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
-
-                       bnx2x_pfc_brb_get_e3b0_config_params(
-                           params,
-                           &e3b0_val,
-                           pfc_params->cos0_pauseable,
-                           pfc_params->cos1_pauseable);
-                       /**
-                        * The number of free blocks below which the full signal to the
-                        * LB port is asserted.
-                       */
-                       REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
-                                  e3b0_val.full_lb_xoff_th);
-                       /**
-                        * The number of free blocks above which the full signal to the
-                        * LB port is de-asserted.
-                       */
-                       REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
-                                  e3b0_val.full_lb_xon_threshold);
-                       /**
-                       * The number of blocks guarantied for the MAC #n port. n=0,1
-                       */
-
-                       /*The number of blocks guarantied for the LB port.*/
-                       REG_WR(bp, BRB1_REG_LB_GUARANTIED,
-                              e3b0_val.lb_guarantied);
-
-                       /**
-                        * The number of blocks guarantied for the MAC #n port.
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
-                                  2 * e3b0_val.mac_0_class_t_guarantied);
-                       REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
-                                  2 * e3b0_val.mac_1_class_t_guarantied);
-                       /**
-                        * The number of blocks guarantied for class #t in MAC0. t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
-                              e3b0_val.mac_0_class_t_guarantied);
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
-                              e3b0_val.mac_0_class_t_guarantied);
-                       /**
-                        * The hysteresis on the guarantied buffer space for class in
-                        * MAC0.  t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
-                              e3b0_val.mac_0_class_t_guarantied_hyst);
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
-                              e3b0_val.mac_0_class_t_guarantied_hyst);
-
-                       /**
-                        * The number of blocks guarantied for class #t in MAC1.t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
-                              e3b0_val.mac_1_class_t_guarantied);
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
-                              e3b0_val.mac_1_class_t_guarantied);
-                       /**
-                        * The hysteresis on the guarantied buffer space for class #t
-                       * in MAC1.  t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
-                              e3b0_val.mac_1_class_t_guarantied_hyst);
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
-                              e3b0_val.mac_1_class_t_guarantied_hyst);
-
-           }
+               /*
+                * The number of blocks guarantied for the MAC #n port.
+                */
+               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+                      2 * e3b0_val.mac_0_class_t_guarantied);
+               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+                      2 * e3b0_val.mac_1_class_t_guarantied);
+               /*
+                * The number of blocks guarantied for class #t in MAC0. t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+                      e3b0_val.mac_0_class_t_guarantied);
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+                      e3b0_val.mac_0_class_t_guarantied);
+               /*
+                * The hysteresis on the guarantied buffer space for class in
+                * MAC0.  t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+                      e3b0_val.mac_0_class_t_guarantied_hyst);
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+                      e3b0_val.mac_0_class_t_guarantied_hyst);
 
+               /*
+                * The number of blocks guarantied for class #t in MAC1.t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+                      e3b0_val.mac_1_class_t_guarantied);
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+                      e3b0_val.mac_1_class_t_guarantied);
+               /*
+                * The hysteresis on the guarantied buffer space for class #t
+                * in MAC1.  t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+                      e3b0_val.mac_1_class_t_guarantied_hyst);
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+                      e3b0_val.mac_1_class_t_guarantied_hyst);
        }
 
        return bnx2x_status;
@@ -2515,7 +2619,7 @@ int bnx2x_update_pfc(struct link_params *params,
 
        /* update BRB params */
        bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
-       if (0 != bnx2x_status)
+       if (bnx2x_status)
                return bnx2x_status;
 
        if (!vars->link_up)
@@ -2533,7 +2637,6 @@ int bnx2x_update_pfc(struct link_params *params,
                        bnx2x_emac_enable(params, vars, 0);
                        return bnx2x_status;
                }
-
                if (CHIP_IS_E2(bp))
                        bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
                else
@@ -3053,7 +3156,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK, "write phy register failed\n");
                netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                rc = -EFAULT;
-
        } else {
                /* data */
                tmp = ((phy->addr << 21) | (devad << 16) | val |
@@ -3090,8 +3192,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                               EMAC_MDIO_STATUS_10MB);
        return rc;
 }
-
-
 /******************************************************************/
 /*                     BSC access functions from E3              */
 /******************************************************************/
@@ -3339,7 +3439,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
                aer_val = 0x3800 + offset - 1;
        else
                aer_val = 0x3800 + offset;
-       DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+
        CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
                          MDIO_AER_BLOCK_AER_REG, aer_val);
 
@@ -3942,13 +4042,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
 
 static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
                                           struct link_params *params,
-                                          u8 fiber_mode)
+                                          u8 fiber_mode,
+                                          u8 always_autoneg)
 {
        struct bnx2x *bp = params->bp;
        u16 val16, digctrl_kx1, digctrl_kx2;
-       u8 lane;
-
-       lane = bnx2x_get_warpcore_lane(phy, params);
 
        /* Clear XFI clock comp in non-10G single lane mode. */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3956,7 +4054,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
 
-       if (phy->req_line_speed == SPEED_AUTO_NEG) {
+       if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
                /* SGMII Autoneg */
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                                MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3967,7 +4065,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
        } else {
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                                MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-               val16 &= 0xcfbf;
+               val16 &= 0xcebf;
                switch (phy->req_line_speed) {
                case SPEED_10:
                        break;
@@ -4043,9 +4141,7 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_DIGITAL5_MISC6, &val);
 }
-
-
-       /* Clear SFI/XFI link settings registers */
+/* Clear SFI/XFI link settings registers */
 static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
                                      struct link_params *params,
                                      u16 lane)
@@ -4250,7 +4346,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                vars->phy_flags |= PHY_SGMII_FLAG;
                DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
                bnx2x_warpcore_clear_regs(phy, params, lane);
-               bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+               bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
        } else {
                switch (serdes_net_if) {
                case PORT_HW_CFG_NET_SERDES_IF_KR:
@@ -4278,7 +4374,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                                }
                                bnx2x_warpcore_set_sgmii_speed(phy,
                                                                params,
-                                                               fiber_mode);
+                                                               fiber_mode,
+                                                               0);
                        }
 
                        break;
@@ -4291,7 +4388,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                                bnx2x_warpcore_set_10G_XFI(phy, params, 0);
                        } else if (vars->line_speed == SPEED_1000) {
                                DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
-                               bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+                               bnx2x_warpcore_set_sgmii_speed(
+                                               phy, params, 1, 0);
                        }
                        /* Issue Module detection */
                        if (bnx2x_is_sfp_module_plugged(phy, params))
@@ -4428,12 +4526,6 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
 
                /* Switch back to 4-copy registers */
                bnx2x_set_aer_mmd(params, phy);
-               /* Global loopback, not recommended. */
-               bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                               MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-               bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                               MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
-                               0x4000);
        } else {
                /* 10G & 20G */
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4450,25 +4542,14 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
 }
 
 
-void bnx2x_link_status_update(struct link_params *params,
-                             struct link_vars *vars)
+void bnx2x_sync_link(struct link_params *params,
+                          struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u8 link_10g_plus;
-       u8 port = params->port;
-       u32 sync_offset, media_types;
-       /* Update PHY configuration */
-       set_phy_vars(params, vars);
-
-       vars->link_status = REG_RD(bp, params->shmem_base +
-                                  offsetof(struct shmem_region,
-                                           port_mb[port].link_status));
-
-       vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
-       vars->phy_flags = PHY_XGXS_FLAG;
        if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
                vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
-
+       vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
        if (vars->link_up) {
                DP(NETIF_MSG_LINK, "phy link up\n");
 
@@ -4563,7 +4644,23 @@ void bnx2x_link_status_update(struct link_params *params,
                if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
                        vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
        }
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+                             struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+       u8 port = params->port;
+       u32 sync_offset, media_types;
+       /* Update PHY configuration */
+       set_phy_vars(params, vars);
 
+       vars->link_status = REG_RD(bp, params->shmem_base +
+                                  offsetof(struct shmem_region,
+                                           port_mb[port].link_status));
+
+       vars->phy_flags = PHY_XGXS_FLAG;
+       bnx2x_sync_link(params, vars);
        /* Sync media type */
        sync_offset = params->shmem_base +
                        offsetof(struct shmem_region,
@@ -4602,7 +4699,6 @@ void bnx2x_link_status_update(struct link_params *params,
                 vars->line_speed, vars->duplex, vars->flow_ctrl);
 }
 
-
 static void bnx2x_set_master_ln(struct link_params *params,
                                struct bnx2x_phy *phy)
 {
@@ -4676,11 +4772,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
         *  Each two bits represents a lane number:
         *  No swap is 0123 => 0x1b no need to enable the swap
         */
-       u16 ser_lane, rx_lane_swap, tx_lane_swap;
+       u16 rx_lane_swap, tx_lane_swap;
 
-       ser_lane = ((params->lane_config &
-                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
        rx_lane_swap = ((params->lane_config &
                         PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
                        PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
@@ -5356,7 +5449,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
                                      struct link_params *params,
                                      struct link_vars *vars)
 {
-
        struct bnx2x *bp = params->bp;
 
        u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
@@ -5403,9 +5495,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
                                     struct link_params *params,
                                     struct link_vars *vars)
 {
-
        struct bnx2x *bp = params->bp;
-
        u8 lane;
        u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
        int rc = 0;
@@ -6678,7 +6768,6 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        return rc;
 }
 
-
 /*****************************************************************************/
 /*                         External Phy section                             */
 /*****************************************************************************/
@@ -8103,7 +8192,15 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
                                    struct link_params *params)
 {
+       struct bnx2x *bp = params->bp;
        bnx2x_warpcore_power_module(params, phy, 0);
+       /* Put Warpcore in low power mode */
+       REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+       /* Put LCPLL in low power mode */
+       REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+       REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+       REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
 }
 
 static void bnx2x_power_sfp_module(struct link_params *params,
@@ -9040,13 +9137,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                        DP(NETIF_MSG_LINK,
                           "8727 Power fault has been detected on port %d\n",
                           oc_port);
-                       netdev_err(bp->dev, "Error:  Power fault on Port %d has"
-                                           " been detected and the power to "
-                                           "that SFP+ module has been removed"
-                                           " to prevent failure of the card."
-                                           " Please remove the SFP+ module and"
-                                           " restart the system to clear this"
-                                           " error.\n",
+                       netdev_err(bp->dev, "Error: Power fault on Port %d has "
+                                           "been detected and the power to "
+                                           "that SFP+ module has been removed "
+                                           "to prevent failure of the card. "
+                                           "Please remove the SFP+ module and "
+                                           "restart the system to clear this "
+                                           "error.\n",
                         oc_port);
                        /* Disable all RX_ALARMs except for mod_abs */
                        bnx2x_cl45_write(bp, phy,
@@ -9228,7 +9325,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
                                struct bnx2x_phy *phy)
 {
-       u16 val;
+       u16 val, offset;
 
        /* PHYC_CTL_LED_CTL */
        bnx2x_cl45_read(bp, phy,
@@ -9263,14 +9360,22 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
                        MDIO_PMA_REG_8481_LED3_BLINK,
                        0);
 
-       bnx2x_cl45_read(bp, phy,
+       /* Configure the blink rate to ~15.9 Hz */
+       bnx2x_cl45_write(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
-       val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+                       MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+                       MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+       else
+               offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
 
+       bnx2x_cl45_read(bp, phy,
+                       MDIO_PMA_DEVAD, offset, &val);
+       val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
        bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+                        MDIO_PMA_DEVAD, offset, val);
 
        /* 'Interrupt Mask' */
        bnx2x_cl45_write(bp, phy,
@@ -9283,7 +9388,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                                       struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u16 autoneg_val, an_1000_val, an_10_100_val;
+       u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
        u16 tmp_req_line_speed;
 
        tmp_req_line_speed = phy->req_line_speed;
@@ -9378,6 +9483,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
                                 (1<<15 | 1<<9 | 7<<0));
+               /* The PHY needs this set even for forced link. */
+               an_10_100_val |= (1<<8) | (1<<7);
                DP(NETIF_MSG_LINK, "Setting 100M force\n");
        }
        if ((phy->req_line_speed == SPEED_10) &&
@@ -9415,9 +9522,17 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                        DP(NETIF_MSG_LINK, "Advertising 10G\n");
                        /* Restart autoneg for 10G*/
 
+                       bnx2x_cl45_read(bp, phy,
+                                       MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+                                       &an_10g_val);
+                       bnx2x_cl45_write(bp, phy,
+                                        MDIO_AN_DEVAD,
+                                        MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+                                        an_10g_val | 0x1000);
                        bnx2x_cl45_write(bp, phy,
-                                MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
-                                0x3200);
+                                        MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+                                        0x3200);
        } else
                bnx2x_cl45_write(bp, phy,
                                 MDIO_AN_DEVAD,
@@ -9449,74 +9564,95 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
        return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
-
-#define PHY84833_HDSHK_WAIT 300
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
                                   struct link_params *params,
-                                  struct link_vars *vars)
+                  u16 fw_cmd,
+                  u16 cmd_args[])
 {
        u32 idx;
-       u32 pair_swap;
        u16 val;
-       u16 data;
        struct bnx2x *bp = params->bp;
-       /* Do pair swap */
-
-       /* Check for configuration. */
-       pair_swap = REG_RD(bp, params->shmem_base +
-                          offsetof(struct shmem_region,
-                       dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
-               PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
-
-       if (pair_swap == 0)
-               return 0;
-
-       data = (u16)pair_swap;
-
        /* Write CMD_OPEN_OVERRIDE to STATUS reg */
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG2,
-                       PHY84833_CMD_OPEN_OVERRIDE);
-       for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+                       MDIO_84833_CMD_HDLR_STATUS,
+                       PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-               if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+                               MDIO_84833_CMD_HDLR_STATUS, &val);
+               if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
                        break;
                msleep(1);
        }
-       if (idx >= PHY84833_HDSHK_WAIT) {
-               DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+       if (idx >= PHY84833_CMDHDLR_WAIT) {
+               DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
                return -EINVAL;
        }
 
+       /* Prepare argument(s) and issue command */
+       for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               cmd_args[idx]);
+       }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG4,
-                       data);
-       /* Issue pair swap command */
-       bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG0,
-                       PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
-       for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+                       MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-               if ((val == PHY84833_CMD_COMPLETE_PASS) ||
-                       (val == PHY84833_CMD_COMPLETE_ERROR))
+                               MDIO_84833_CMD_HDLR_STATUS, &val);
+               if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+                       (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
                        break;
                msleep(1);
        }
-       if ((idx >= PHY84833_HDSHK_WAIT) ||
-               (val == PHY84833_CMD_COMPLETE_ERROR)) {
-               DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+       if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+               (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+               DP(NETIF_MSG_LINK, "FW cmd failed.\n");
                return -EINVAL;
        }
+       /* Gather returning data */
+       for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               &cmd_args[idx]);
+       }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG2,
-                       PHY84833_CMD_CLEAR_COMPLETE);
-       DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+                       MDIO_84833_CMD_HDLR_STATUS,
+                       PHY84833_STATUS_CMD_CLEAR_COMPLETE);
        return 0;
 }
 
 
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+                                  struct link_params *params,
+                                  struct link_vars *vars)
+{
+       u32 pair_swap;
+       u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+       int status;
+       struct bnx2x *bp = params->bp;
+
+       /* Check for configuration. */
+       pair_swap = REG_RD(bp, params->shmem_base +
+                          offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+               PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+       if (pair_swap == 0)
+               return 0;
+
+       /* Only the second argument is used for this command */
+       data[1] = (u16)pair_swap;
+
+       status = bnx2x_84833_cmd_hdlr(phy, params,
+               PHY84833_CMD_SET_PAIR_SWAP, data);
+       if (status == 0)
+               DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+       return status;
+}
+
 static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
                                      u32 shmem_base_path[],
                                      u32 chip_id)
@@ -9579,24 +9715,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
        return 0;
 }
 
-static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
-                                               u32 shmem_base_path[],
-                                               u32 chip_id)
-{
-       u8 reset_gpios;
-
-       reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
-
-       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
-       udelay(10);
-       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
-       msleep(800);
-       DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
-               reset_gpios);
-
-       return 0;
-}
-
 #define PHY84833_CONSTANT_LATENCY 1193
 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                                   struct link_params *params,
@@ -9605,8 +9723,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u8 port, initialize = 1;
        u16 val;
-       u16 temp;
-       u32 actual_phy_selection, cms_enable, idx;
+       u32 actual_phy_selection, cms_enable;
+       u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
        int rc = 0;
 
        msleep(1);
@@ -9625,6 +9743,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_CTRL, 0x8000);
+       }
+
+       bnx2x_wait_reset_complete(bp, phy, params);
+
+       /* Wait for GPHY to come out of reset */
+       msleep(50);
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
                /* Bring PHY out of super isolate mode */
                bnx2x_cl45_read(bp, phy,
                                MDIO_CTL_DEVAD,
@@ -9633,26 +9758,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                MDIO_CTL_DEVAD,
                                MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-       }
-
-       bnx2x_wait_reset_complete(bp, phy, params);
-
-       /* Wait for GPHY to come out of reset */
-       msleep(50);
-
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
                bnx2x_84833_pair_swap_cfg(phy, params, vars);
-
-       /*
-        * BCM84823 requires that XGXS links up first @ 10G for normal behavior
-        */
-       temp = vars->line_speed;
-       vars->line_speed = SPEED_10000;
-       bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
-       bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
-       vars->line_speed = temp;
-
-       /* Set dual-media configuration according to configuration */
+       } else {
+               /*
+                * BCM84823 requires that XGXS links up first @ 10G for normal
+                * behavior.
+                */
+               u16 temp;
+               temp = vars->line_speed;
+               vars->line_speed = SPEED_10000;
+               bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+               bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+               vars->line_speed = temp;
+       }
 
        bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
                        MDIO_CTL_REG_84823_MEDIA, &val);
@@ -9700,64 +9818,18 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        /* AutogrEEEn */
        if (params->feature_config_flags &
-               FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
-               /* Ensure that f/w is ready */
-               for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-                       bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                                       MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-                       if (val == PHY84833_CMD_OPEN_FOR_CMDS)
-                               break;
-                       usleep_range(1000, 1000);
-               }
-               if (idx >= PHY84833_HDSHK_WAIT) {
-                       DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
-                       return -EINVAL;
-               }
-
-               /* Select EEE mode */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG3,
-                               0x2);
-
-               /* Set Idle and Latency */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG4,
-                               PHY84833_CONSTANT_LATENCY + 1);
-
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_DATA3_REG,
-                               PHY84833_CONSTANT_LATENCY + 1);
-
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_DATA4_REG,
-                               PHY84833_CONSTANT_LATENCY);
-
-               /* Send EEE instruction to command register */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG0,
-                               PHY84833_DIAG_CMD_SET_EEE_MODE);
-
-               /* Ensure that the command has completed */
-               for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-                       bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                                       MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-                       if ((val == PHY84833_CMD_COMPLETE_PASS) ||
-                               (val == PHY84833_CMD_COMPLETE_ERROR))
-                               break;
-                       usleep_range(1000, 1000);
-               }
-               if ((idx >= PHY84833_HDSHK_WAIT) ||
-                       (val == PHY84833_CMD_COMPLETE_ERROR)) {
-                       DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
-                       return -EINVAL;
-               }
-
-               /* Reset command handler */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                           MDIO_84833_TOP_CFG_SCRATCH_REG2,
-                           PHY84833_CMD_CLEAR_COMPLETE);
-       }
+               FEATURE_CONFIG_AUTOGREEEN_ENABLED)
+               cmd_args[0] = 0x2;
+       else
+               cmd_args[0] = 0x0;
 
+       cmd_args[1] = 0x0;
+       cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+       cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+       rc = bnx2x_84833_cmd_hdlr(phy, params,
+               PHY84833_CMD_SET_EEE_MODE, cmd_args);
+       if (rc != 0)
+               DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
        if (initialize)
                rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
        else
@@ -10144,8 +10216,10 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "54618SE cfg init\n");
        usleep_range(1000, 1000);
 
-       /* This works with E3 only, no need to check the chip
-          before determining the port. */
+       /*
+        * This works with E3 only, no need to check the chip
+        * before determining the port.
+        */
        port = params->port;
 
        cfg_pin = (REG_RD(bp, params->shmem_base +
@@ -10327,6 +10401,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        return 0;
 }
 
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+                                      struct link_params *params, u8 mode)
+{
+       struct bnx2x *bp = params->bp;
+       u16 temp;
+
+       bnx2x_cl22_write(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               MDIO_REG_GPHY_SHADOW_LED_SEL1);
+       bnx2x_cl22_read(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               &temp);
+       temp &= 0xff00;
+
+       DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+       switch (mode) {
+       case LED_MODE_FRONT_PANEL_OFF:
+       case LED_MODE_OFF:
+               temp |= 0x00ee;
+               break;
+       case LED_MODE_OPER:
+               temp |= 0x0001;
+               break;
+       case LED_MODE_ON:
+               temp |= 0x00ff;
+               break;
+       default:
+               break;
+       }
+       bnx2x_cl22_write(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+       return;
+}
+
+
 static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
                                     struct link_params *params)
 {
@@ -11103,7 +11214,7 @@ static struct bnx2x_phy phy_54618se = {
        .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
        .format_fw_ver  = (format_fw_ver_t)NULL,
        .hw_reset       = (hw_reset_t)NULL,
-       .set_link_led   = (set_link_led_t)NULL,
+       .set_link_led   = (set_link_led_t)bnx2x_5461x_set_link_led,
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 /*****************************************************************/
@@ -11181,7 +11292,9 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                                       offsetof(struct shmem_region,
                        dev_info.port_feature_config[port].link_config)) &
                          PORT_FEATURE_CONNECTED_SWITCH_MASK);
-       chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+       chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+               ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
        DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
        if (USES_WARPCORE(bp)) {
                u32 serdes_net_if;
@@ -11360,6 +11473,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
                return -EINVAL;
        default:
                *phy = phy_null;
+               /* In case external PHY wasn't found */
+               if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+                   (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+                       return -EINVAL;
                return 0;
        }
 
@@ -11533,7 +11650,7 @@ u32 bnx2x_phy_selection(struct link_params *params)
 
 int bnx2x_phy_probe(struct link_params *params)
 {
-       u8 phy_index, actual_phy_idx, link_cfg_idx;
+       u8 phy_index, actual_phy_idx;
        u32 phy_config_swapped, sync_offset, media_types;
        struct bnx2x *bp = params->bp;
        struct bnx2x_phy *phy;
@@ -11544,7 +11661,6 @@ int bnx2x_phy_probe(struct link_params *params)
 
        for (phy_index = INT_PHY; phy_index < MAX_PHYS;
              phy_index++) {
-               link_cfg_idx = LINK_CONFIG_IDX(phy_index);
                actual_phy_idx = phy_index;
                if (phy_config_swapped) {
                        if (phy_index == EXT_PHY1)
@@ -12210,6 +12326,63 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
        return 0;
 }
 
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+                                               u32 shmem_base_path[],
+                                               u32 shmem2_base_path[],
+                                               u8 phy_index,
+                                               u32 chip_id)
+{
+       u8 reset_gpios;
+       struct bnx2x_phy phy;
+       u32 shmem_base, shmem2_base, cnt;
+       s8 port = 0;
+       u16 val;
+
+       reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+       udelay(10);
+       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+       DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+               reset_gpios);
+       for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+               /* This PHY is for E2 and E3. */
+               shmem_base = shmem_base_path[port];
+               shmem2_base = shmem2_base_path[port];
+               /* Extract the ext phy address for the port */
+               if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+                                      0, &phy) !=
+                   0) {
+                       DP(NETIF_MSG_LINK, "populate_phy failed\n");
+                       return -EINVAL;
+               }
+
+               /* Wait for FW completing its initialization. */
+               for (cnt = 0; cnt < 1000; cnt++) {
+                       bnx2x_cl45_read(bp, &phy,
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_CTRL, &val);
+                       if (!(val & (1<<15)))
+                               break;
+                       msleep(1);
+               }
+               if (cnt >= 1000)
+                       DP(NETIF_MSG_LINK,
+                               "84833 Cmn reset timeout (%d)\n", port);
+
+               /* Put the port in super isolate mode. */
+               bnx2x_cl45_read(bp, &phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+               val |= MDIO_84833_SUPER_ISOLATE;
+               bnx2x_cl45_write(bp, &phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+       }
+
+       return 0;
+}
+
+
 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                                     u32 shmem2_base_path[], u8 phy_index,
                                     u32 ext_phy_type, u32 chip_id)
@@ -12244,7 +12417,9 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                 * GPIO3's are linked, and so both need to be toggled
                 * to obtain required 2us pulse.
                 */
-               rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+               rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+                                               shmem2_base_path,
+                                               phy_index, chip_id);
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
                rc = -EINVAL;
index 2a46e633abe9c7992566c2e0fe0b89fa32816d8c..e02a68a7fb85f112953384bb96154547f8a8c2d5 100644 (file)
@@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
 /*  Configure the COS to ETS according to BW and SP settings.*/
 int bnx2x_ets_e3b0_config(const struct link_params *params,
                         const struct link_vars *vars,
-                        const struct bnx2x_ets_params *ets_params);
+                        struct bnx2x_ets_params *ets_params);
 /* Read pfc statistic*/
 void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
                                                 u32 pfc_frames_sent[2],
index 6486ab8c8fc87d803a6e5a3d507d50796704db08..ffeaaa95ed96eb983cfd60d5c5c2395a92b3b952 100644 (file)
@@ -2318,12 +2318,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
-       return 2 * vn + BP_PORT(bp);
-}
-
 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 {
        struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2469,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
           "rate shaping and fairness are disabled\n");
 }
 
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
-       int func;
-       int vn;
-
-       /* Set the attention towards other drivers on the same port */
-       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
-               if (vn == BP_VN(bp))
-                       continue;
-
-               func = func_by_vn(bp, vn);
-               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-                      (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-       }
-}
-
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2549,6 +2527,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
        if (bp->state != BNX2X_STATE_OPEN)
                return;
 
+       /* read updated dcb configuration */
+       bnx2x_dcbx_pmf_update(bp);
+
        bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 
        if (bp->link_vars.link_up)
@@ -2643,15 +2624,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
        return rc;
 }
 
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
-       /* Statistics are not supported for CNIC Clients at the moment */
-       if (IS_FCOE_FP(fp))
-               return false;
-#endif
-       return true;
-}
 
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
@@ -2695,11 +2667,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
         *  parent connection). The statistics are zeroed when the parent
         *  connection is initialized.
         */
-       if (stat_counter_valid(bp, fp)) {
-               __set_bit(BNX2X_Q_FLG_STATS, &flags);
-               if (zero_stats)
-                       __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
-       }
+
+       __set_bit(BNX2X_Q_FLG_STATS, &flags);
+       if (zero_stats)
+               __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
 
        return flags;
 }
@@ -2808,8 +2780,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
        /* This should be a maximum number of data bytes that may be
         * placed on the BD (not including paddings).
         */
-       rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
-               IP_HEADER_ALIGNMENT_PADDING;
+       rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+               BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
 
        rxq_init->cl_qzone_id = fp->cl_qzone_id;
        rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2912,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
         */
 }
 
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+       struct eth_stats_info *ether_stat =
+               &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+       /* leave last char as NULL */
+       memcpy(ether_stat->version, DRV_MODULE_VERSION,
+              ETH_STAT_INFO_VERSION_LEN - 1);
+
+       bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+                                        DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+                                        ether_stat->mac_local);
+
+       ether_stat->mtu_size = bp->dev->mtu;
+
+       if (bp->dev->features & NETIF_F_RXCSUM)
+               ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+       if (bp->dev->features & NETIF_F_TSO)
+               ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+       ether_stat->feature_flags |= bp->common.boot_mode;
+
+       ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+       ether_stat->txq_size = bp->tx_ring_size;
+       ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct fcoe_stats_info *fcoe_stat =
+               &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+       memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+       fcoe_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+       /* insert FCoE stats from ramrod response */
+       if (!NO_FCOE(bp)) {
+               struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       tstorm_queue_statistics;
+
+               struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       xstorm_queue_statistics;
+
+               struct fcoe_statistics_params *fw_fcoe_stat =
+                       &bp->fw_stats_data->fcoe;
+
+               ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+               ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->mcast_pkts_sent);
+       }
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct iscsi_stats_info *iscsi_stat =
+               &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+       memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+       iscsi_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
 /* called due to MCP event (on pmf):
  *     reread new bandwidth configuration
  *     configure FW
@@ -2960,6 +3069,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
        bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 }
 
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+       enum drv_info_opcode op_code;
+       u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+       /* if drv_info version supported by MFW doesn't match - send NACK */
+       if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+                 DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+       memset(&bp->slowpath->drv_info_to_mcp, 0,
+              sizeof(union drv_info_to_mcp));
+
+       switch (op_code) {
+       case ETH_STATS_OPCODE:
+               bnx2x_drv_info_ether_stat(bp);
+               break;
+       case FCOE_STATS_OPCODE:
+               bnx2x_drv_info_fcoe_stat(bp);
+               break;
+       case ISCSI_STATS_OPCODE:
+               bnx2x_drv_info_iscsi_stat(bp);
+               break;
+       default:
+               /* if op code isn't supported - send NACK */
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       /* if we got drv_info attn from MFW then these fields are defined in
+        * shmem2 for sure
+        */
+       SHMEM2_WR(bp, drv_info_host_addr_lo,
+               U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+       SHMEM2_WR(bp, drv_info_host_addr_hi,
+               U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+       bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
        DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3471,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
        netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
               " the driver to shutdown the card to prevent permanent"
               " damage.  Please contact OEM Support for assistance\n");
+
+       /*
+        * Scheudle device reset (unload)
+        * This is due to some boards consuming sufficient power when driver is
+        * up to overheat if fan fails.
+        */
+       smp_mb__before_clear_bit();
+       set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+       smp_mb__after_clear_bit();
+       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
 }
 
 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3620,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                        if (val & DRV_STATUS_SET_MF_BW)
                                bnx2x_set_mf_bw(bp);
 
+                       if (val & DRV_STATUS_DRV_INFO_REQ)
+                               bnx2x_handle_drv_info_req(bp);
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
 
@@ -5247,7 +5413,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
        u8 cos;
        unsigned long q_type = 0;
        u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+       fp->rx_queue = fp_idx;
        fp->cid = fp_idx;
        fp->cl_id = bnx2x_fp_cl_id(fp);
        fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6856,13 +7022,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
 static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 {
        int num_groups;
+       int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
 
-       /* number of eth_queues */
-       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+       /* number of queues for statistics is number of eth queues + FCoE */
+       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
 
        /* Total number of FW statistics requests =
-        * 1 for port stats + 1 for PF stats + num_eth_queues */
-       bp->fw_stats_num = 2 + num_queue_stats;
+        * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+        * num of queues
+        */
+       bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
 
 
        /* Request is built from stats_query_header and an array of
@@ -6870,8 +7039,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
         * configured in the stats_query_header.
         */
-       num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
-               (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+       num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+                    (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 
        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
                        num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7049,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         *
         * stats_counter holds per-STORM counters that are incremented
         * when STORM has finished with the current request.
+        *
+        * memory for FCoE offloaded statistics are counted anyway,
+        * even if they will not be sent.
         */
        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
                sizeof(struct per_pf_stats) +
+               sizeof(struct fcoe_statistics_params) +
                sizeof(struct per_queue_stats) * num_queue_stats +
                sizeof(struct stats_counter);
 
@@ -7025,6 +7198,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
 {
        unsigned long ramrod_flags = 0;
 
+#ifdef BCM_CNIC
+       if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+               DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+               return 0;
+       }
+#endif
+
        DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 
        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8522,6 +8702,17 @@ sp_rtnl_not_reset:
        if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
                bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
 
+       /*
+        * in case of fan failure we need to reset id if the "stop on error"
+        * debug flag is set, since we trying to prevent permanent overheating
+        * damage
+        */
+       if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+               DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+               netif_device_detach(bp->dev);
+               bnx2x_close(bp->dev);
+       }
+
 sp_rtnl_exit:
        rtnl_unlock();
 }
@@ -8708,7 +8899,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
-       u32 val, val2, val3, val4, id;
+       u32 val, val2, val3, val4, id, boot_mode;
        u16 pmc;
 
        /* Get the chip revision id and number. */
@@ -8817,6 +9008,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+       bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+                       BC_SUPPORTS_PFC_STATS : 0;
+
+       boot_mode = SHMEM_RD(bp,
+                       dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+                       PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+       switch (boot_mode) {
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+               break;
+       }
 
        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
        bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9478,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                                                        bp->common.shmem2_base);
 }
 
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
 {
+#ifdef BCM_CNIC
        int port = BP_PORT(bp);
-       int func = BP_ABS_FUNC(bp);
 
        u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
                                drv_lic_key[port].max_iscsi_conn);
-       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
-                               drv_lic_key[port].max_fcoe_conn);
 
-       /* Get the number of maximum allowed iSCSI and FCoE connections */
+       /* Get the number of maximum allowed iSCSI connections */
        bp->cnic_eth_dev.max_iscsi_conn =
                (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
                BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
 
+       BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+                      bp->cnic_eth_dev.max_iscsi_conn);
+
+       /*
+        * If maximum allowed number of connections is zero -
+        * disable the feature.
+        */
+       if (!bp->cnic_eth_dev.max_iscsi_conn)
+               bp->flags |= NO_ISCSI_FLAG;
+#else
+       bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       int port = BP_PORT(bp);
+       int func = BP_ABS_FUNC(bp);
+
+       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+                               drv_lic_key[port].max_fcoe_conn);
+
+       /* Get the number of maximum allowed FCoE connections */
        bp->cnic_eth_dev.max_fcoe_conn =
                (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
                BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9566,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
                }
        }
 
-       BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
-                      bp->cnic_eth_dev.max_iscsi_conn,
-                      bp->cnic_eth_dev.max_fcoe_conn);
+       BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
 
        /*
         * If maximum allowed number of connections is zero -
         * disable the feature.
         */
-       if (!bp->cnic_eth_dev.max_iscsi_conn)
-               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
        if (!bp->cnic_eth_dev.max_fcoe_conn)
                bp->flags |= NO_FCOE_FLAG;
-}
+#else
+       bp->flags |= NO_FCOE_FLAG;
 #endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+       /*
+        * iSCSI may be dynamically disabled but reading
+        * info here we will decrease memory usage by driver
+        * if the feature is disabled for good
+        */
+       bnx2x_get_iscsi_info(bp);
+       bnx2x_get_fcoe_info(bp);
+}
 
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
@@ -9374,7 +9614,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-               /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+               /*
+                * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
                 * FCoE MAC then the appropriate feature should be disabled.
                 */
                if (IS_MF_SI(bp)) {
@@ -9396,11 +9637,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                val = MF_CFG_RD(bp, func_ext_config[func].
                                                    fcoe_mac_addr_lower);
                                bnx2x_set_mac_buf(fip_mac, val, val2);
-                               BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+                               BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
                                               fip_mac);
 
                        } else
                                bp->flags |= NO_FCOE_FLAG;
+               } else { /* SD mode */
+                       if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+                               /* use primary mac as iscsi mac */
+                               memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+                               /* Zero primary MAC configuration */
+                               memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+                               BNX2X_DEV_INFO("SD ISCSI MODE\n");
+                               BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+                                              iscsi_mac);
+                       }
                }
 #endif
        } else {
@@ -9449,7 +9701,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        }
 #endif
 
-       if (!is_valid_ether_addr(bp->dev->dev_addr))
+       if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
                dev_err(&bp->pdev->dev,
                        "bad Ethernet MAC address configuration: "
                        "%pM, change it manually before bringing up "
@@ -9661,9 +9913,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
-#ifdef BCM_CNIC
        bnx2x_get_cnic_info(bp);
-#endif
 
        /* Get current FW pulse sequence */
        if (!BP_NOMCP(bp)) {
@@ -9681,30 +9931,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 {
        int cnt, i, block_end, rodi;
-       char vpd_data[BNX2X_VPD_LEN+1];
+       char vpd_start[BNX2X_VPD_LEN+1];
        char str_id_reg[VENDOR_ID_LEN+1];
        char str_id_cap[VENDOR_ID_LEN+1];
+       char *vpd_data;
+       char *vpd_extended_data = NULL;
        u8 len;
 
-       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
        memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 
        if (cnt < BNX2X_VPD_LEN)
                goto out_not_found;
 
-       i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+       /* VPD RO tag should be first tag after identifier string, hence
+        * we should be able to find it in first BNX2X_VPD_LEN chars
+        */
+       i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
                             PCI_VPD_LRDT_RO_DATA);
        if (i < 0)
                goto out_not_found;
 
-
        block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-                   pci_vpd_lrdt_size(&vpd_data[i]);
+                   pci_vpd_lrdt_size(&vpd_start[i]);
 
        i += PCI_VPD_LRDT_TAG_SIZE;
 
-       if (block_end > BNX2X_VPD_LEN)
-               goto out_not_found;
+       if (block_end > BNX2X_VPD_LEN) {
+               vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+               if (vpd_extended_data  == NULL)
+                       goto out_not_found;
+
+               /* read rest of vpd image into vpd_extended_data */
+               memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+               cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+                                  block_end - BNX2X_VPD_LEN,
+                                  vpd_extended_data + BNX2X_VPD_LEN);
+               if (cnt < (block_end - BNX2X_VPD_LEN))
+                       goto out_not_found;
+               vpd_data = vpd_extended_data;
+       } else
+               vpd_data = vpd_start;
+
+       /* now vpd_data holds full vpd content in both cases */
 
        rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
                                   PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +10005,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
                                bp->fw_ver[len] = ' ';
                        }
                }
+               kfree(vpd_extended_data);
                return;
        }
 out_not_found:
+       kfree(vpd_extended_data);
        return;
 }
 
@@ -9840,15 +10111,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 
        bp->multi_mode = multi_mode;
 
+       bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+       bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
        /* Set TPA flags */
-       if (disable_tpa) {
+       if (bp->disable_tpa) {
                bp->flags &= ~TPA_ENABLE_FLAG;
                bp->dev->features &= ~NETIF_F_LRO;
        } else {
                bp->flags |= TPA_ENABLE_FLAG;
                bp->dev->features |= NETIF_F_LRO;
        }
-       bp->disable_tpa = disable_tpa;
 
        if (CHIP_IS_E1(bp))
                bp->dropless_fc = 0;
@@ -9965,7 +10241,7 @@ static int bnx2x_open(struct net_device *dev)
 }
 
 /* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
@@ -10119,6 +10395,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
        }
 
        bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+       /* handle ISCSI SD mode */
+       if (IS_MF_ISCSI_SD(bp))
+               bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
 
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10479,15 @@ static void poll_bnx2x(struct net_device *dev)
 }
 #endif
 
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+               return -EADDRNOTAVAIL;
+       return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -10205,7 +10495,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_select_queue       = bnx2x_select_queue,
        .ndo_set_rx_mode        = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_validate_addr      = bnx2x_validate_addr,
        .ndo_do_ioctl           = bnx2x_ioctl,
        .ndo_change_mtu         = bnx2x_change_mtu,
        .ndo_fix_features       = bnx2x_fix_features,
@@ -10548,33 +10838,38 @@ do {                                                                  \
 
 int bnx2x_init_firmware(struct bnx2x *bp)
 {
-       const char *fw_file_name;
        struct bnx2x_fw_file_hdr *fw_hdr;
        int rc;
 
-       if (CHIP_IS_E1(bp))
-               fw_file_name = FW_FILE_NAME_E1;
-       else if (CHIP_IS_E1H(bp))
-               fw_file_name = FW_FILE_NAME_E1H;
-       else if (!CHIP_IS_E1x(bp))
-               fw_file_name = FW_FILE_NAME_E2;
-       else {
-               BNX2X_ERR("Unsupported chip revision\n");
-               return -EINVAL;
-       }
 
-       BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+       if (!bp->firmware) {
+               const char *fw_file_name;
 
-       rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
-       if (rc) {
-               BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
-               goto request_firmware_exit;
-       }
+               if (CHIP_IS_E1(bp))
+                       fw_file_name = FW_FILE_NAME_E1;
+               else if (CHIP_IS_E1H(bp))
+                       fw_file_name = FW_FILE_NAME_E1H;
+               else if (!CHIP_IS_E1x(bp))
+                       fw_file_name = FW_FILE_NAME_E2;
+               else {
+                       BNX2X_ERR("Unsupported chip revision\n");
+                       return -EINVAL;
+               }
+               BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
 
-       rc = bnx2x_check_firmware(bp);
-       if (rc) {
-               BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
-               goto request_firmware_exit;
+               rc = request_firmware(&bp->firmware, fw_file_name,
+                                     &bp->pdev->dev);
+               if (rc) {
+                       BNX2X_ERR("Can't load firmware file %s\n",
+                                 fw_file_name);
+                       goto request_firmware_exit;
+               }
+
+               rc = bnx2x_check_firmware(bp);
+               if (rc) {
+                       BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+                       goto request_firmware_exit;
+               }
        }
 
        fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
@@ -10630,6 +10925,7 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
        kfree(bp->init_ops);
        kfree(bp->init_data);
        release_firmware(bp->firmware);
+       bp->firmware = NULL;
 }
 
 
@@ -10817,8 +11113,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 
 #ifdef BCM_CNIC
-       /* disable FCOE L2 queue for E1x and E3*/
-       if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+       /* disable FCOE L2 queue for E1x */
+       if (CHIP_IS_E1x(bp))
                bp->flags |= NO_FCOE_FLAG;
 
 #endif
@@ -10925,6 +11221,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
        if (bp->doorbells)
                iounmap(bp->doorbells);
 
+       bnx2x_release_firmware(bp);
+
        bnx2x_free_mem_bp(bp);
 
        free_netdev(dev);
@@ -11478,6 +11776,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                smp_mb__after_atomic_inc();
                break;
        }
+       case DRV_CTL_ULP_REGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
+       case DRV_CTL_ULP_UNREGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
 
        default:
                BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11553,7 +11883,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
 
        mutex_lock(&bp->cnic_mutex);
        cp->drv_state = 0;
-       rcu_assign_pointer(bp->cnic_ops, NULL);
+       RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_mutex);
        synchronize_rcu();
        kfree(bp->cnic_kwq);
index fc7bd0f23c0b7e656905eb590ea6d8c0d360ea44..44609de4e5dc59f7ecbe41d3ea8c0e01f17d837e 100644 (file)
 #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1                         0x6007c
 /* [RW 10] Write client 0: Assert pause threshold. */
 #define BRB1_REG_PAUSE_LOW_THRESHOLD_0                          0x60068
-#define BRB1_REG_PAUSE_LOW_THRESHOLD_1                          0x6006c
-/* [R 24] The number of full blocks occupied by port. */
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE                        0x60268
+/* [R 24] The number of full blocks occpied by port. */
 #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0                          0x60094
 /* [RW 1] Reset the design by software. */
 #define BRB1_REG_SOFT_RESET                                     0x600dc
    register bits. */
 #define MISC_REG_LCPLL_CTRL_1                                   0xa2a4
 #define MISC_REG_LCPLL_CTRL_REG_2                               0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN                               0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA                           0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG                           0xaa7c
 /* [RW 4] Interrupt mask register #0 read/write */
 #define MISC_REG_MISC_INT_MASK                                  0xa388
 /* [RW 1] Parity mask register #0 read/write */
  * is compared to the value on ctrl_md_devad. Drives output
  * misc_xgxs0_phy_addr. Global register. */
 #define MISC_REG_WC0_CTRL_PHY_ADDR                              0xa9cc
+#define MISC_REG_WC0_RESET                                      0xac30
 /* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
    side. This should be less than or equal to phy_port_mode; if some of the
    ports are not used. This enables reduction of frequency on the core side.
@@ -6823,11 +6835,13 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER       0x0000
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER                0x0100
 #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G                      0x1000
-#define MDIO_CTL_REG_84823_USER_CTRL_REG               0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS               0x0080
-
-#define MDIO_PMA_REG_84823_CTL_LED_CTL_1               0xa8e3
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN             0x0080
+#define MDIO_CTL_REG_84823_USER_CTRL_REG                       0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS                       0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH               0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ       0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1                       0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1                       0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN                     0x0080
 
 /* BCM84833 only */
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1                        0x401a
@@ -6838,26 +6852,35 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_84833_TOP_CFG_SCRATCH_REG2                        0x4007
 #define MDIO_84833_TOP_CFG_SCRATCH_REG3                        0x4008
 #define MDIO_84833_TOP_CFG_SCRATCH_REG4                        0x4009
-#define MDIO_84833_TOP_CFG_DATA3_REG                   0x4011
-#define MDIO_84833_TOP_CFG_DATA4_REG                   0x4012
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26               0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27               0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28               0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29               0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30               0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31               0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND    MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS     MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1      MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2      MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3      MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4      MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5      MDIO_84833_TOP_CFG_SCRATCH_REG31
 
 /* Mailbox command set used by 84833. */
-#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE             0x2
+#define PHY84833_CMD_SET_PAIR_SWAP                     0x8001
+#define PHY84833_CMD_GET_EEE_MODE                      0x8008
+#define PHY84833_CMD_SET_EEE_MODE                      0x8009
 /* Mailbox status set used by 84833. */
-#define PHY84833_CMD_RECEIVED                          0x0001
-#define PHY84833_CMD_IN_PROGRESS                       0x0002
-#define PHY84833_CMD_COMPLETE_PASS                     0x0004
-#define PHY84833_CMD_COMPLETE_ERROR                    0x0008
-#define PHY84833_CMD_OPEN_FOR_CMDS                     0x0010
-#define PHY84833_CMD_SYSTEM_BOOT                       0x0020
-#define PHY84833_CMD_NOT_OPEN_FOR_CMDS                 0x0040
-#define PHY84833_CMD_CLEAR_COMPLETE                    0x0080
-#define PHY84833_CMD_OPEN_OVERRIDE                     0xa5a5
-
+#define PHY84833_STATUS_CMD_RECEIVED                   0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS                        0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS              0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR             0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS              0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT                        0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS          0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE             0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE              0xa5a5
 
-/* 84833 F/W Feature Commands */
-#define PHY84833_DIAG_CMD_GET_EEE_MODE                 0x27
-#define PHY84833_DIAG_CMD_SET_EEE_MODE                 0x28
 
 /* Warpcore clause 45 addressing */
 #define MDIO_WC_DEVAD                                  0x3
@@ -6990,6 +7013,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_REG_INTR_MASK                             0x1b
 #define MDIO_REG_INTR_MASK_LINK_STATUS                 (0x1 << 1)
 #define MDIO_REG_GPHY_SHADOW                           0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1                  (0x0d << 10)
 #define MDIO_REG_GPHY_SHADOW_LED_SEL2                  (0x0e << 10)
 #define MDIO_REG_GPHY_SHADOW_WR_ENA                    (0x1 << 15)
 #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED              (0x1e << 10)
index 0440425c83d6820e3ba2ad12ec57430ec9896492..5ac616093f9f7d4c4dfd5696dd966ff614091c45 100644 (file)
@@ -30,6 +30,8 @@
 
 #define BNX2X_MAX_EMUL_MULTI           16
 
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
 /**** Exe Queue interfaces ****/
 
 /**
@@ -441,6 +443,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
        return true;
 }
 
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+                               int n, u8 *buf)
+{
+       struct bnx2x_vlan_mac_registry_elem *pos;
+       u8 *next = buf;
+       int counter = 0;
+
+       /* traverse list */
+       list_for_each_entry(pos, &o->head, link) {
+               if (counter < n) {
+                       /* place leading zeroes in buffer */
+                       memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+                       /* place mac after leading zeroes*/
+                       memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+                              ETH_ALEN);
+
+                       /* calculate address of next element and
+                        * advance counter
+                        */
+                       counter++;
+                       next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+                       DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+                          counter, next, pos->u.mac.mac);
+               }
+       }
+       return counter * ETH_ALEN;
+}
+
 /* check_add() callbacks */
 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
                               union bnx2x_classification_ramrod_data *data)
@@ -1886,6 +1918,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
                mac_obj->check_move        = bnx2x_check_move;
                mac_obj->ramrod_cmd        =
                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+               mac_obj->get_n_elements    = bnx2x_get_n_elements;
 
                /* Exe Queue */
                bnx2x_exe_queue_init(bp,
@@ -3342,7 +3375,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
                if (!list_empty(&o->registry.exact_match.macs))
                        return 0;
 
-               elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+               elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
                if (!elem) {
                        BNX2X_ERR("Failed to allocate registry memory\n");
                        return -ENOMEM;
@@ -5380,7 +5413,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
        rc = drv->init_fw(bp);
        if (rc) {
                BNX2X_ERR("Error loading firmware\n");
-               goto fw_init_err;
+               goto init_err;
        }
 
        /* Handle the beginning of COMMON_XXX pases separatelly... */
@@ -5388,25 +5421,25 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
        case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
                rc = bnx2x_func_init_cmn_chip(bp, drv);
                if (rc)
-                       goto init_hw_err;
+                       goto init_err;
 
                break;
        case FW_MSG_CODE_DRV_LOAD_COMMON:
                rc = bnx2x_func_init_cmn(bp, drv);
                if (rc)
-                       goto init_hw_err;
+                       goto init_err;
 
                break;
        case FW_MSG_CODE_DRV_LOAD_PORT:
                rc = bnx2x_func_init_port(bp, drv);
                if (rc)
-                       goto init_hw_err;
+                       goto init_err;
 
                break;
        case FW_MSG_CODE_DRV_LOAD_FUNCTION:
                rc = bnx2x_func_init_func(bp, drv);
                if (rc)
-                       goto init_hw_err;
+                       goto init_err;
 
                break;
        default:
@@ -5414,10 +5447,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
                rc = -EINVAL;
        }
 
-init_hw_err:
-       drv->release_fw(bp);
-
-fw_init_err:
+init_err:
        drv->gunzip_end(bp);
 
        /* In case of success, complete the comand immediatelly: no ramrods
index 9a517c2e9f1b66c6be01d286c21a6b978cfc684d..992308ff82e845b8698f1647a258b8bdc968aa7e 100644 (file)
@@ -285,6 +285,19 @@ struct bnx2x_vlan_mac_obj {
        /* RAMROD command to be used */
        int                             ramrod_cmd;
 
+       /* copy first n elements onto preallocated buffer
+        *
+        * @param n number of elements to get
+        * @param buf buffer preallocated by caller into which elements
+        *            will be copied. Note elements are 4-byte aligned
+        *            so buffer size must be able to accomodate the
+        *            aligned elements.
+        *
+        * @return number of copied bytes
+        */
+       int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+                             int n, u8 *buf);
+
        /**
         * Checks if ADD-ramrod with the given params may be performed.
         *
index 02ac6a771bf99eaa254ee9c66e199dfbe70552df..bc0121ac291e249f0f21876dc01127fc46c256d4 100644 (file)
@@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref)
 #endif
 }
 
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+       u16 res = sizeof(struct host_port_stats) >> 2;
+
+       /* if PFC stats are not supported by the MFW, don't DMA them */
+       if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
+               res -= (sizeof(u32)*4) >> 2;
+
+       return res;
+}
+
 /*
  * Init service functions
  */
@@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
                                   DMAE_LEN32_RD_MAX * 4);
        dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
                                   DMAE_LEN32_RD_MAX * 4);
-       dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+       dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
                dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
                dmae->dst_addr_lo = bp->port.port_stx >> 2;
                dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
+               dmae->len = bnx2x_get_port_stats_dma_len(bp);
                dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
                dmae->comp_addr_hi = 0;
                dmae->comp_val = 1;
@@ -540,6 +552,25 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
                UPDATE_STAT64(tx_stat_gterr,
                                tx_stat_dot3statsinternalmactransmiterrors);
                UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+               /* collect PFC stats */
+               DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
+                       pstats->pfc_frames_tx_hi,
+                       diff.lo, new->tx_stat_gtpp_lo,
+                       pstats->pfc_frames_tx_lo);
+               pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+               pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+               ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
+                       pstats->pfc_frames_tx_lo, diff.lo);
+
+               DIFF_64(diff.hi, new->rx_stat_grpp_hi,
+                       pstats->pfc_frames_rx_hi,
+                       diff.lo, new->rx_stat_grpp_lo,
+                       pstats->pfc_frames_rx_lo);
+               pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+               pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+               ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
+                       pstats->pfc_frames_rx_lo, diff.lo);
        }
 
        estats->pause_frames_received_hi =
@@ -551,6 +582,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
                                pstats->mac_stx[1].tx_stat_outxoffsent_hi;
        estats->pause_frames_sent_lo =
                                pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+       estats->pfc_frames_received_hi =
+                               pstats->pfc_frames_rx_hi;
+       estats->pfc_frames_received_lo =
+                               pstats->pfc_frames_rx_lo;
+       estats->pfc_frames_sent_hi =
+                               pstats->pfc_frames_tx_hi;
+       estats->pfc_frames_sent_lo =
+                               pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +611,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
        ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
        ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
 
+       /* collect pfc stats */
+       ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+               pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+       ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+               pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
 
        ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
        ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +673,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
                                pstats->mac_stx[1].tx_stat_outxoffsent_hi;
        estats->pause_frames_sent_lo =
                                pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+       estats->pfc_frames_received_hi =
+                               pstats->pfc_frames_rx_hi;
+       estats->pfc_frames_received_lo =
+                               pstats->pfc_frames_rx_lo;
+       estats->pfc_frames_sent_hi =
+                               pstats->pfc_frames_tx_hi;
+       estats->pfc_frames_sent_lo =
+                               pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +794,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
        estats->brb_drop_hi = pstats->brb_drop_hi;
        estats->brb_drop_lo = pstats->brb_drop_lo;
 
-       pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+       pstats->host_port_stats_counter++;
 
        if (!BP_NOMCP(bp)) {
                u32 nig_timer_max =
@@ -1265,7 +1319,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
                dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
                dmae->dst_addr_lo = bp->port.port_stx >> 2;
                dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
+               dmae->len = bnx2x_get_port_stats_dma_len(bp);
                if (bp->func_stx) {
                        dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
                        dmae->comp_addr_hi = 0;
@@ -1349,12 +1403,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
        enum bnx2x_stats_state state;
        if (unlikely(bp->panic))
                return;
-       bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
        spin_lock_bh(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
        spin_unlock_bh(&bp->stats_lock);
 
+       bnx2x_stats_stm[state][event].action(bp);
+
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
                   state, event, bp->stats_state);
@@ -1380,7 +1436,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
        dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
        dmae->dst_addr_lo = bp->port.port_stx >> 2;
        dmae->dst_addr_hi = 0;
-       dmae->len = sizeof(struct host_port_stats) >> 2;
+       dmae->len = bnx2x_get_port_stats_dma_len(bp);
        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_val = DMAE_COMP_VAL;
@@ -1457,6 +1513,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
 static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 {
        int i;
+       int first_queue_query_index;
        struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
 
        dma_addr_t cur_data_offset;
@@ -1512,14 +1569,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
        cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
        cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
 
+       /**** FCoE FW statistics data ****/
+       if (!NO_FCOE(bp)) {
+               cur_data_offset = bp->fw_stats_data_mapping +
+                       offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+               cur_query_entry =
+                       &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+               cur_query_entry->kind = STATS_TYPE_FCOE;
+               /* For FCoE query index is a DONT CARE */
+               cur_query_entry->index = BP_PORT(bp);
+               cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+               cur_query_entry->address.hi =
+                       cpu_to_le32(U64_HI(cur_data_offset));
+               cur_query_entry->address.lo =
+                       cpu_to_le32(U64_LO(cur_data_offset));
+       }
+
        /**** Clients' queries ****/
        cur_data_offset = bp->fw_stats_data_mapping +
                offsetof(struct bnx2x_fw_stats_data, queue_stats);
 
+       /* first queue query index depends whether FCoE offloaded request will
+        * be included in the ramrod
+        */
+       if (!NO_FCOE(bp))
+               first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+       else
+               first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
        for_each_eth_queue(bp, i) {
                cur_query_entry =
                        &bp->fw_stats_req->
-                                       query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+                                       query[first_queue_query_index + i];
 
                cur_query_entry->kind = STATS_TYPE_QUEUE;
                cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1531,6 +1614,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 
                cur_data_offset += sizeof(struct per_queue_stats);
        }
+
+       /* add FCoE queue query if needed */
+       if (!NO_FCOE(bp)) {
+               cur_query_entry =
+                       &bp->fw_stats_req->
+                                       query[first_queue_query_index + i];
+
+               cur_query_entry->kind = STATS_TYPE_QUEUE;
+               cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+               cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+               cur_query_entry->address.hi =
+                       cpu_to_le32(U64_HI(cur_data_offset));
+               cur_query_entry->address.lo =
+                       cpu_to_le32(U64_LO(cur_data_offset));
+       }
 }
 
 void bnx2x_stats_init(struct bnx2x *bp)
index 5d8ce2f6afef13779aad7442e147d8dab7ba6aa9..683deb053109d817aa578906f4f6874c0ec994b5 100644 (file)
@@ -193,6 +193,12 @@ struct bnx2x_eth_stats {
        u32 total_tpa_aggregated_frames_lo;
        u32 total_tpa_bytes_hi;
        u32 total_tpa_bytes_lo;
+
+       /* PFC */
+       u32 pfc_frames_received_hi;
+       u32 pfc_frames_received_lo;
+       u32 pfc_frames_sent_hi;
+       u32 pfc_frames_sent_lo;
 };
 
 
index 6f10c6939834df7cd68f246175bbd9aa094110f6..4bcb67eedf1ebba563b85873c5330410939aa46d 100644 (file)
@@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
        return io->data;
 }
 
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct drv_ctl_info info;
+
+       if (reg)
+               info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+       else
+               info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+       info.data.ulp_type = ulp_type;
+       ethdev->drv_ctl(dev->netdev, &info);
+}
+
 static int cnic_in_use(struct cnic_sock *csk)
 {
        return test_bit(SK_F_INUSE, &csk->flags);
@@ -506,7 +521,7 @@ int cnic_unregister_driver(int ulp_type)
        }
        read_unlock(&cnic_dev_lock);
 
-       rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+       RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 
        mutex_unlock(&cnic_lock);
        synchronize_rcu();
@@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 
        mutex_unlock(&cnic_lock);
 
+       cnic_ulp_ctl(dev, ulp_type, true);
+
        return 0;
 
 }
@@ -579,7 +596,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
        }
        mutex_lock(&cnic_lock);
        if (rcu_dereference(cp->ulp_ops[ulp_type])) {
-               rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+               RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
                cnic_put(dev);
        } else {
                pr_err("%s: device not registered to this ulp type %d\n",
@@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
        if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
                netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 
+       cnic_ulp_ctl(dev, ulp_type, false);
+
        return 0;
 }
 EXPORT_SYMBOL(cnic_unregister_driver);
@@ -3052,9 +3071,26 @@ static void cnic_ulp_start(struct cnic_dev *dev)
        }
 }
 
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_ulp_ops *ulp_ops;
+       int rc;
+
+       mutex_lock(&cnic_lock);
+       ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+       if (ulp_ops && ulp_ops->cnic_get_stats)
+               rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+       else
+               rc = -ENODEV;
+       mutex_unlock(&cnic_lock);
+       return rc;
+}
+
 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
 {
        struct cnic_dev *dev = data;
+       int ulp_type = CNIC_ULP_ISCSI;
 
        switch (info->cmd) {
        case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3136,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
                }
                break;
        }
+       case CNIC_CTL_FCOE_STATS_GET_CMD:
+               ulp_type = CNIC_ULP_FCOE;
+               /* fall through */
+       case CNIC_CTL_ISCSI_STATS_GET_CMD:
+               cnic_hold(dev);
+               cnic_copy_ulp_stats(dev, ulp_type);
+               cnic_put(dev);
+               break;
+
        default:
                return -EINVAL;
        }
@@ -3475,7 +3520,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
-       ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+       fl6.daddr = dst_addr->sin6_addr;
        if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
                fl6.flowi6_oif = dst_addr->sin6_scope_id;
 
@@ -5134,7 +5179,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
                }
                cnic_shutdown_rings(dev);
                clear_bit(CNIC_F_CNIC_UP, &dev->flags);
-               rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+               RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
                synchronize_rcu();
                cnic_cm_shutdown(dev);
                cp->stop_hw(dev);
@@ -5288,6 +5333,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        cdev->pcidev = pdev;
        cp->chip_id = ethdev->chip_id;
 
+       cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
        if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
                cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
        if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
index 79443e0dbf962586526ffda570057379dcec791c..d1f6456d22bb68d568fb64e1a1254fd394946f83 100644 (file)
@@ -86,6 +86,8 @@ struct kcqe {
 #define CNIC_CTL_START_CMD             2
 #define CNIC_CTL_COMPLETION_CMD                3
 #define CNIC_CTL_STOP_ISCSI_CMD                4
+#define CNIC_CTL_FCOE_STATS_GET_CMD    5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD   6
 
 #define DRV_CTL_IO_WR_CMD              0x101
 #define DRV_CTL_IO_RD_CMD              0x102
@@ -96,6 +98,8 @@ struct kcqe {
 #define DRV_CTL_STOP_L2_CMD            0x107
 #define DRV_CTL_RET_L2_SPQ_CREDIT_CMD  0x10c
 #define DRV_CTL_ISCSI_STOPPED_CMD      0x10d
+#define DRV_CTL_ULP_REGISTER_CMD       0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD     0x10f
 
 struct cnic_ctl_completion {
        u32     cid;
@@ -133,6 +137,7 @@ struct drv_ctl_info {
                struct drv_ctl_spq_credit credit;
                struct drv_ctl_io io;
                struct drv_ctl_l2_ring ring;
+               int ulp_type;
                char bytes[MAX_DRV_CTL_DATA];
        } data;
 };
@@ -201,6 +206,7 @@ struct cnic_eth_dev {
                                               struct kwqe_16 *[], u32);
        int             (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
        unsigned long   reserved1[2];
+       union drv_info_to_mcp   *addr_drv_info_to_mcp;
 };
 
 struct cnic_sockaddr {
@@ -297,6 +303,8 @@ struct cnic_dev {
        int             max_fcoe_conn;
        int             max_rdma_conn;
 
+       union drv_info_to_mcp   *stats_addr;
+
        void            *cnic_priv;
 };
 
@@ -326,6 +334,7 @@ struct cnic_ulp_ops {
        void (*cm_remote_abort)(struct cnic_sock *);
        int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
                                  char *data, u16 data_size);
+       int (*cnic_get_stats)(void *ulp_ctx);
        struct module *owner;
        atomic_t ref_count;
 };
index 0a1d7f279fc8fd83565915a2daaed7bcc3df2621..8fa7abc53ec69a38fce149dbf37acb5eca2a2cf1 100644 (file)
@@ -163,7 +163,6 @@ enum sbmac_state {
 #define SBMAC_MAX_TXDESCR      256
 #define SBMAC_MAX_RXDESCR      256
 
-#define ETHER_ADDR_LEN         6
 #define ENET_PACKET_SIZE       1518
 /*#define ENET_PACKET_SIZE     9216 */
 
@@ -266,7 +265,7 @@ struct sbmac_softc {
        int                     sbm_pause;      /* current pause setting */
        int                     sbm_link;       /* current link state */
 
-       unsigned char           sbm_hwaddr[ETHER_ADDR_LEN];
+       unsigned char           sbm_hwaddr[ETH_ALEN];
 
        struct sbmacdma         sbm_txdma;      /* only channel 0 for now */
        struct sbmacdma         sbm_rxdma;
@@ -2676,15 +2675,4 @@ static struct platform_driver sbmac_driver = {
        },
 };
 
-static int __init sbmac_init_module(void)
-{
-       return platform_driver_register(&sbmac_driver);
-}
-
-static void __exit sbmac_cleanup_module(void)
-{
-       platform_driver_unregister(&sbmac_driver);
-}
-
-module_init(sbmac_init_module);
-module_exit(sbmac_cleanup_module);
+module_platform_driver(sbmac_driver);
index bf4074167d6a321e8f5888c2e2909b0b71e1f62b..076e02a415a09d09878db3ea1c0071c03ac4a685 100644 (file)
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    121
+#define TG3_MIN_NUM                    122
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "November 2, 2011"
+#define DRV_MODULE_RELDATE     "December 7, 2011"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
        (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
         TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
 #define TG3_DEF_RX_JUMBO_RING_PENDING  100
-#define TG3_RSS_INDIR_TBL_SIZE         128
 
 /* Do not place this n-ring entries value into the tp struct itself,
  * we really want to expose these constants to GCC so that modulo et
@@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 #if (NET_IP_ALIGN != 0)
 #define TG3_RX_OFFSET(tp)      ((tp)->rx_offset)
 #else
-#define TG3_RX_OFFSET(tp)      0
+#define TG3_RX_OFFSET(tp)      (NET_SKB_PAD)
 #endif
 
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX              4096
+#define TG3_TX_BD_DMA_MAX_2K           2048
+#define TG3_TX_BD_DMA_MAX_4K           4096
 
 #define TG3_RAW_IP_ALIGN 2
 
@@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp)
        }
 }
 
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
-       u16 miireg;
-
-       if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
-               miireg = ADVERTISE_PAUSE_CAP;
-       else if (flow_ctrl & FLOW_CTRL_TX)
-               miireg = ADVERTISE_PAUSE_ASYM;
-       else if (flow_ctrl & FLOW_CTRL_RX)
-               miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-       else
-               miireg = 0;
-
-       return miireg;
-}
-
 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 {
        u16 miireg;
@@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
 {
        u8 cap = 0;
 
-       if (lcladv & ADVERTISE_1000XPAUSE) {
-               if (lcladv & ADVERTISE_1000XPSE_ASYM) {
-                       if (rmtadv & LPA_1000XPAUSE)
-                               cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
-                       else if (rmtadv & LPA_1000XPAUSE_ASYM)
-                               cap = FLOW_CTRL_RX;
-               } else {
-                       if (rmtadv & LPA_1000XPAUSE)
-                               cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
-               }
-       } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
-               if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+       if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+               cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+       } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+               if (lcladv & ADVERTISE_1000XPAUSE)
+                       cap = FLOW_CTRL_RX;
+               if (rmtadv & ADVERTISE_1000XPAUSE)
                        cap = FLOW_CTRL_TX;
        }
 
@@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev)
                if (phydev->duplex == DUPLEX_HALF)
                        mac_mode |= MAC_MODE_HALF_DUPLEX;
                else {
-                       lcl_adv = tg3_advert_flowctrl_1000T(
+                       lcl_adv = mii_advertise_flowctrl(
                                  tp->link_config.flowctrl);
 
                        if (phydev->pause)
@@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
        if (tp->link_config.active_speed == SPEED_1000 &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+            tg3_flag(tp, 57765_CLASS)) &&
            !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
                val = MII_TG3_DSP_TAP26_ALNOKO |
                      MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
        bool need_vaux = false;
 
        /* The GPIOs do something completely different on 57765. */
-       if (!tg3_flag(tp, IS_NIC) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
                return;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
        u32 val, new_adv;
 
        new_adv = ADVERTISE_CSMA;
-       if (advertise & ADVERTISED_10baseT_Half)
-               new_adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               new_adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               new_adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               new_adv |= ADVERTISE_100FULL;
-
-       new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+       new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+       new_adv |= mii_advertise_flowctrl(flowctrl);
 
        err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
        if (err)
                goto done;
 
-       if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
-               goto done;
-
-       new_adv = 0;
-       if (advertise & ADVERTISED_1000baseT_Half)
-               new_adv |= ADVERTISE_1000HALF;
-       if (advertise & ADVERTISED_1000baseT_Full)
-               new_adv |= ADVERTISE_1000FULL;
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+               new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
-               new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+                   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+                       new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 
-       err = tg3_writephy(tp, MII_CTRL1000, new_adv);
-       if (err)
-               goto done;
+               err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+               if (err)
+                       goto done;
+       }
 
        if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
                goto done;
@@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
                switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
                case ASIC_REV_5717:
                case ASIC_REV_57765:
+               case ASIC_REV_57766:
                case ASIC_REV_5719:
                        /* If we advertised any eee advertisements above... */
                        if (val)
@@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
        return err;
 }
 
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
 {
-       u32 adv_reg, all_mask = 0;
+       u32 advmsk, tgtadv, advertising;
 
-       if (mask & ADVERTISED_10baseT_Half)
-               all_mask |= ADVERTISE_10HALF;
-       if (mask & ADVERTISED_10baseT_Full)
-               all_mask |= ADVERTISE_10FULL;
-       if (mask & ADVERTISED_100baseT_Half)
-               all_mask |= ADVERTISE_100HALF;
-       if (mask & ADVERTISED_100baseT_Full)
-               all_mask |= ADVERTISE_100FULL;
+       advertising = tp->link_config.advertising;
+       tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
 
-       if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
-               return 0;
+       advmsk = ADVERTISE_ALL;
+       if (tp->link_config.active_duplex == DUPLEX_FULL) {
+               tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+               advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+       }
 
-       if ((adv_reg & ADVERTISE_ALL) != all_mask)
-               return 0;
+       if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+               return false;
+
+       if ((*lcladv & advmsk) != tgtadv)
+               return false;
 
        if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                u32 tg3_ctrl;
 
-               all_mask = 0;
-               if (mask & ADVERTISED_1000baseT_Half)
-                       all_mask |= ADVERTISE_1000HALF;
-               if (mask & ADVERTISED_1000baseT_Full)
-                       all_mask |= ADVERTISE_1000FULL;
+               tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
 
                if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
-                       return 0;
+                       return false;
 
                tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
-               if (tg3_ctrl != all_mask)
-                       return 0;
+               if (tg3_ctrl != tgtadv)
+                       return false;
        }
 
-       return 1;
+       return true;
 }
 
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
 {
-       u32 curadv, reqadv;
+       u32 lpeth = 0;
 
-       if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
-               return 1;
-
-       curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-       reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+               u32 val;
 
-       if (tp->link_config.active_duplex == DUPLEX_FULL) {
-               if (curadv != reqadv)
-                       return 0;
+               if (tg3_readphy(tp, MII_STAT1000, &val))
+                       return false;
 
-               if (tg3_flag(tp, PAUSE_AUTONEG))
-                       tg3_readphy(tp, MII_LPA, rmtadv);
-       } else {
-               /* Reprogram the advertisement register, even if it
-                * does not affect the current link.  If the link
-                * gets renegotiated in the future, we can save an
-                * additional renegotiation cycle by advertising
-                * it correctly in the first place.
-                */
-               if (curadv != reqadv) {
-                       *lcladv &= ~(ADVERTISE_PAUSE_CAP |
-                                    ADVERTISE_PAUSE_ASYM);
-                       tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
-               }
+               lpeth = mii_stat1000_to_ethtool_lpa_t(val);
        }
 
-       return 1;
+       if (tg3_readphy(tp, MII_LPA, rmtadv))
+               return false;
+
+       lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+       tp->link_config.rmt_adv = lpeth;
+
+       return true;
 }
 
 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        current_link_up = 0;
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
+       tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+       tp->link_config.rmt_adv = 0;
 
        if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
                err = tg3_phy_auxctl_read(tp,
@@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 
                if (tp->link_config.autoneg == AUTONEG_ENABLE) {
                        if ((bmcr & BMCR_ANENABLE) &&
-                           tg3_copper_is_advertising_all(tp,
-                                               tp->link_config.advertising)) {
-                               if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
-                                                                 &rmt_adv))
-                                       current_link_up = 1;
-                       }
+                           tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+                           tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+                               current_link_up = 1;
                } else {
                        if (!(bmcr & BMCR_ANENABLE) &&
                            tp->link_config.speed == current_speed &&
@@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                }
 
                if (current_link_up == 1 &&
-                   tp->link_config.active_duplex == DUPLEX_FULL)
+                   tp->link_config.active_duplex == DUPLEX_FULL) {
+                       u32 reg, bit;
+
+                       if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+                               reg = MII_TG3_FET_GEN_STAT;
+                               bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+                       } else {
+                               reg = MII_TG3_EXT_STAT;
+                               bit = MII_TG3_EXT_STAT_MDIX;
+                       }
+
+                       if (!tg3_readphy(tp, reg, &val) && (val & bit))
+                               tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
                        tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+               }
        }
 
 relink:
@@ -4643,6 +4606,9 @@ restart_autoneg:
                        if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
                                remote_adv |= LPA_1000XPAUSE_ASYM;
 
+                       tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
+
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
                        current_link_up = 1;
                        tp->serdes_counter = 0;
@@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
                        if (rxflags & MR_LP_ADV_ASYM_PAUSE)
                                remote_adv |= LPA_1000XPAUSE_ASYM;
 
+                       tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
+
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
 
                        current_link_up = 1;
@@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
        udelay(40);
 
        current_link_up = 0;
+       tp->link_config.rmt_adv = 0;
        mac_status = tr32(MAC_STATUS);
 
        if (tg3_flag(tp, HW_AUTONEG))
@@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
        current_link_up = 0;
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
+       tp->link_config.rmt_adv = 0;
 
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
            (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
                /* do nothing, just check for link up at the end */
        } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
-               u32 adv, new_adv;
+               u32 adv, newadv;
 
                err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
-               new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
-                                 ADVERTISE_1000XPAUSE |
-                                 ADVERTISE_1000XPSE_ASYM |
-                                 ADVERTISE_SLCT);
+               newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+                                ADVERTISE_1000XPAUSE |
+                                ADVERTISE_1000XPSE_ASYM |
+                                ADVERTISE_SLCT);
 
-               new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+               newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+               newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
 
-               if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
-                       new_adv |= ADVERTISE_1000XHALF;
-               if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
-                       new_adv |= ADVERTISE_1000XFULL;
-
-               if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
-                       tg3_writephy(tp, MII_ADVERTISE, new_adv);
+               if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+                       tg3_writephy(tp, MII_ADVERTISE, newadv);
                        bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
                        tg3_writephy(tp, MII_BMCR, bmcr);
 
@@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
                                        current_duplex = DUPLEX_FULL;
                                else
                                        current_duplex = DUPLEX_HALF;
+
+                               tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
                        } else if (!tg3_flag(tp, 5780_CLASS)) {
                                /* Link is up via parallel detect */
                        } else {
@@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
        u32 sw_idx = tnapi->tx_cons;
        struct netdev_queue *txq;
        int index = tnapi - tp->napi;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
 
        if (tg3_flag(tp, ENABLE_TSS))
                index--;
@@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
                        sw_idx = NEXT_TX(sw_idx);
                }
 
+               pkts_compl++;
+               bytes_compl += skb->len;
+
                dev_kfree_skb(skb);
 
                if (unlikely(tx_bug)) {
@@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi)
                }
        }
 
+       netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+
        tnapi->tx_cons = sw_idx;
 
        /* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi)
        }
 }
 
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
 {
-       if (!ri->skb)
+       if (!ri->data)
                return;
 
        pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
                         map_sz, PCI_DMA_FROMDEVICE);
-       dev_kfree_skb_any(ri->skb);
-       ri->skb = NULL;
+       kfree(ri->data);
+       ri->data = NULL;
 }
 
 /* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
  * buffers the cpu only reads the last cacheline of the RX descriptor
  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  */
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
                            u32 opaque_key, u32 dest_idx_unmasked)
 {
        struct tg3_rx_buffer_desc *desc;
        struct ring_info *map;
-       struct sk_buff *skb;
+       u8 *data;
        dma_addr_t mapping;
-       int skb_size, dest_idx;
+       int skb_size, data_size, dest_idx;
 
        switch (opaque_key) {
        case RXD_OPAQUE_RING_STD:
                dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
                desc = &tpr->rx_std[dest_idx];
                map = &tpr->rx_std_buffers[dest_idx];
-               skb_size = tp->rx_pkt_map_sz;
+               data_size = tp->rx_pkt_map_sz;
                break;
 
        case RXD_OPAQUE_RING_JUMBO:
                dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
                desc = &tpr->rx_jmb[dest_idx].std;
                map = &tpr->rx_jmb_buffers[dest_idx];
-               skb_size = TG3_RX_JMB_MAP_SZ;
+               data_size = TG3_RX_JMB_MAP_SZ;
                break;
 
        default:
@@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
         * Callers depend upon this behavior and assume that
         * we leave everything unchanged if we fail.
         */
-       skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
-       if (skb == NULL)
+       skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+                  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       data = kmalloc(skb_size, GFP_ATOMIC);
+       if (!data)
                return -ENOMEM;
 
-       skb_reserve(skb, TG3_RX_OFFSET(tp));
-
-       mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+       mapping = pci_map_single(tp->pdev,
+                                data + TG3_RX_OFFSET(tp),
+                                data_size,
                                 PCI_DMA_FROMDEVICE);
        if (pci_dma_mapping_error(tp->pdev, mapping)) {
-               dev_kfree_skb(skb);
+               kfree(data);
                return -EIO;
        }
 
-       map->skb = skb;
+       map->data = data;
        dma_unmap_addr_set(map, mapping, mapping);
 
        desc->addr_hi = ((u64)mapping >> 32);
        desc->addr_lo = ((u64)mapping & 0xffffffff);
 
-       return skb_size;
+       return data_size;
 }
 
 /* We only need to move over in the address because the other
  * members of the RX descriptor are invariant.  See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
  */
 static void tg3_recycle_rx(struct tg3_napi *tnapi,
                           struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
                return;
        }
 
-       dest_map->skb = src_map->skb;
+       dest_map->data = src_map->data;
        dma_unmap_addr_set(dest_map, mapping,
                           dma_unmap_addr(src_map, mapping));
        dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
         */
        smp_wmb();
 
-       src_map->skb = NULL;
+       src_map->data = NULL;
 }
 
 /* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                struct sk_buff *skb;
                dma_addr_t dma_addr;
                u32 opaque_key, desc_idx, *post_ptr;
+               u8 *data;
 
                desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
                opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
                if (opaque_key == RXD_OPAQUE_RING_STD) {
                        ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
                        dma_addr = dma_unmap_addr(ri, mapping);
-                       skb = ri->skb;
+                       data = ri->data;
                        post_ptr = &std_prod_idx;
                        rx_std_posted++;
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
                        ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
                        dma_addr = dma_unmap_addr(ri, mapping);
-                       skb = ri->skb;
+                       data = ri->data;
                        post_ptr = &jmb_prod_idx;
                } else
                        goto next_pkt_nopost;
@@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                        goto next_pkt;
                }
 
+               prefetch(data + TG3_RX_OFFSET(tp));
                len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
                      ETH_FCS_LEN;
 
                if (len > TG3_RX_COPY_THRESH(tp)) {
                        int skb_size;
 
-                       skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+                       skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
                                                    *post_ptr);
                        if (skb_size < 0)
                                goto drop_it;
@@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                        pci_unmap_single(tp->pdev, dma_addr, skb_size,
                                         PCI_DMA_FROMDEVICE);
 
-                       /* Ensure that the update to the skb happens
+                       skb = build_skb(data);
+                       if (!skb) {
+                               kfree(data);
+                               goto drop_it_no_recycle;
+                       }
+                       skb_reserve(skb, TG3_RX_OFFSET(tp));
+                       /* Ensure that the update to the data happens
                         * after the usage of the old DMA mapping.
                         */
                        smp_wmb();
 
-                       ri->skb = NULL;
+                       ri->data = NULL;
 
-                       skb_put(skb, len);
                } else {
-                       struct sk_buff *copy_skb;
-
                        tg3_recycle_rx(tnapi, tpr, opaque_key,
                                       desc_idx, *post_ptr);
 
-                       copy_skb = netdev_alloc_skb(tp->dev, len +
-                                                   TG3_RAW_IP_ALIGN);
-                       if (copy_skb == NULL)
+                       skb = netdev_alloc_skb(tp->dev,
+                                              len + TG3_RAW_IP_ALIGN);
+                       if (skb == NULL)
                                goto drop_it_no_recycle;
 
-                       skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
-                       skb_put(copy_skb, len);
+                       skb_reserve(skb, TG3_RAW_IP_ALIGN);
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-                       skb_copy_from_linear_data(skb, copy_skb->data, len);
+                       memcpy(skb->data,
+                              data + TG3_RX_OFFSET(tp),
+                              len);
                        pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
-                       /* We'll reuse the original ring buffer. */
-                       skb = copy_skb;
                }
 
+               skb_put(skb, len);
                if ((tp->dev->features & NETIF_F_RXCSUM) &&
                    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
                    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
                di = dpr->rx_std_prod_idx;
 
                for (i = di; i < di + cpycnt; i++) {
-                       if (dpr->rx_std_buffers[i].skb) {
+                       if (dpr->rx_std_buffers[i].data) {
                                cpycnt = i - di;
                                err = -ENOSPC;
                                break;
@@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
                di = dpr->rx_jmb_prod_idx;
 
                for (i = di; i < di + cpycnt; i++) {
-                       if (dpr->rx_jmb_buffers[i].skb) {
+                       if (dpr->rx_jmb_buffers[i].data) {
                                cpycnt = i - di;
                                err = -ENOSPC;
                                break;
@@ -6443,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
        bool hwbug = false;
 
        if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
-               hwbug = 1;
+               hwbug = true;
 
        if (tg3_4g_overflow_test(map, len))
-               hwbug = 1;
+               hwbug = true;
 
        if (tg3_40bit_overflow_test(tp, map, len))
-               hwbug = 1;
+               hwbug = true;
 
-       if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+       if (tp->dma_limit) {
                u32 prvidx = *entry;
                u32 tmp_flag = flags & ~TXD_FLAG_END;
-               while (len > TG3_TX_BD_DMA_MAX && *budget) {
-                       u32 frag_len = TG3_TX_BD_DMA_MAX;
-                       len -= TG3_TX_BD_DMA_MAX;
+               while (len > tp->dma_limit && *budget) {
+                       u32 frag_len = tp->dma_limit;
+                       len -= tp->dma_limit;
 
                        /* Avoid the 8byte DMA problem */
                        if (len <= 8) {
-                               len += TG3_TX_BD_DMA_MAX / 2;
-                               frag_len = TG3_TX_BD_DMA_MAX / 2;
+                               len += tp->dma_limit / 2;
+                               frag_len = tp->dma_limit / 2;
                        }
 
                        tnapi->tx_buffers[*entry].fragmented = true;
@@ -6482,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
                                *budget -= 1;
                                *entry = NEXT_TX(*entry);
                        } else {
-                               hwbug = 1;
+                               hwbug = true;
                                tnapi->tx_buffers[prvidx].fragmented = false;
                        }
                }
@@ -6816,6 +6798,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        skb_tx_timestamp(skb);
+       netdev_sent_queue(tp->dev, skb->len);
 
        /* Packets are ready, update Tx producer idx local and on card. */
        tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6951,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
        return 0;
 }
 
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
 {
        struct tg3 *tp = netdev_priv(dev);
 
@@ -6994,7 +6977,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
        }
 }
 
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct tg3 *tp = netdev_priv(dev);
 
@@ -7004,9 +6988,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
                tg3_set_loopback(dev, features);
@@ -7082,14 +7066,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
        if (tpr != &tp->napi[0].prodring) {
                for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
                     i = (i + 1) & tp->rx_std_ring_mask)
-                       tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+                       tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
                                        tp->rx_pkt_map_sz);
 
                if (tg3_flag(tp, JUMBO_CAPABLE)) {
                        for (i = tpr->rx_jmb_cons_idx;
                             i != tpr->rx_jmb_prod_idx;
                             i = (i + 1) & tp->rx_jmb_ring_mask) {
-                               tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                               tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
                                                TG3_RX_JMB_MAP_SZ);
                        }
                }
@@ -7098,12 +7082,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
        }
 
        for (i = 0; i <= tp->rx_std_ring_mask; i++)
-               tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+               tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
                                tp->rx_pkt_map_sz);
 
        if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
                for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
-                       tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                       tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
                                        TG3_RX_JMB_MAP_SZ);
        }
 }
@@ -7159,7 +7143,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 
        /* Now allocate fresh SKBs for each rx ring. */
        for (i = 0; i < tp->rx_pending; i++) {
-               if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+               if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
                        netdev_warn(tp->dev,
                                    "Using a smaller RX standard ring. Only "
                                    "%d out of %d buffers were allocated "
@@ -7191,7 +7175,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
        }
 
        for (i = 0; i < tp->rx_jumbo_pending; i++) {
-               if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+               if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
                        netdev_warn(tp->dev,
                                    "Using a smaller RX jumbo ring. Only %d "
                                    "out of %d buffers were allocated "
@@ -7297,6 +7281,7 @@ static void tg3_free_rings(struct tg3 *tp)
                        dev_kfree_skb_any(skb);
                }
        }
+       netdev_reset_queue(tp->dev);
 }
 
 /* Initialize tx/rx rings for packet processing.
@@ -7591,8 +7576,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
                if (tnapi->hw_status)
                        memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
        }
-       if (tp->hw_stats)
-               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
        return err;
 }
@@ -7626,15 +7609,11 @@ static void tg3_restore_pci_state(struct tg3 *tp)
 
        pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
-               if (tg3_flag(tp, PCI_EXPRESS))
-                       pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-               else {
-                       pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
-                                             tp->pci_cacheline_sz);
-                       pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
-                                             tp->pci_lat_timer);
-               }
+       if (!tg3_flag(tp, PCI_EXPRESS)) {
+               pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+                                     tp->pci_cacheline_sz);
+               pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+                                     tp->pci_lat_timer);
        }
 
        /* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7798,6 @@ static int tg3_chip_reset(struct tg3 *tp)
                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
                                      val16);
 
-               pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
                /* Clear error status */
                pci_write_config_word(tp->pdev,
                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -7914,6 +7891,11 @@ static int tg3_chip_reset(struct tg3 *tp)
        return 0;
 }
 
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+                                                struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
+                                               struct tg3_ethtool_stats *);
+
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, int silent)
 {
@@ -7931,6 +7913,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
        tg3_write_sig_legacy(tp, kind);
        tg3_write_sig_post_reset(tp, kind);
 
+       if (tp->hw_stats) {
+               /* Save the stats across chip resets... */
+               tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+               tg3_get_estats(tp, &tp->estats_prev);
+
+               /* And make sure the next sample is new data */
+               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+       }
+
        if (err)
                return err;
 
@@ -8074,7 +8065,7 @@ static void tg3_rings_reset(struct tg3 *tp)
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
        else if (tg3_flag(tp, 5717_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       else if (tg3_flag(tp, 57765_CLASS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
        else
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8091,7 +8082,7 @@ static void tg3_rings_reset(struct tg3 *tp)
        else if (!tg3_flag(tp, 5705_PLUS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                tg3_flag(tp, 57765_CLASS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
        else
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8197,7 +8188,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
        if (!tg3_flag(tp, 5750_PLUS) ||
            tg3_flag(tp, 5780_CLASS) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+           tg3_flag(tp, 57765_PLUS))
                bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8209,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
        if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
                return;
 
-       if (!tg3_flag(tp, 5705_PLUS))
-               bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
-       else
-               bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+       bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
 
        host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
 
@@ -8231,6 +8220,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
                tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 }
 
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+{
+       int i;
+
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+               tp->rss_ind_tbl[i] =
+                       ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+       int i;
+
+       if (!tg3_flag(tp, SUPPORT_MSIX))
+               return;
+
+       if (tp->irq_cnt <= 2) {
+               memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+               return;
+       }
+
+       /* Validate table against current IRQ count */
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+               if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+                       break;
+       }
+
+       if (i != TG3_RSS_INDIR_TBL_SIZE)
+               tg3_rss_init_dflt_indir_tbl(tp);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+       int i = 0;
+       u32 reg = MAC_RSS_INDIR_TBL_0;
+
+       while (i < TG3_RSS_INDIR_TBL_SIZE) {
+               u32 val = tp->rss_ind_tbl[i];
+               i++;
+               for (; i % 8; i++) {
+                       val <<= 4;
+                       val |= tp->rss_ind_tbl[i];
+               }
+               tw32(reg, val);
+               reg += 4;
+       }
+}
+
 /* tp->lock is held. */
 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 {
@@ -8337,7 +8374,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(GRC_MODE, grc_mode);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       if (tg3_flag(tp, 57765_CLASS)) {
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
                        u32 grc_mode = tr32(GRC_MODE);
 
@@ -8425,7 +8462,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
                        val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+               if (!tg3_flag(tp, 57765_CLASS) &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
                        val |= DMA_RWCTRL_TAGGED_STAT_WA;
                tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8572,7 +8609,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
                             val | BDINFO_FLAGS_USE_EXT_RECV);
                        if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
-                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                           tg3_flag(tp, 57765_CLASS))
                                tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
                                     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
                } else {
@@ -8581,10 +8618,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                }
 
                if (tg3_flag(tp, 57765_PLUS)) {
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
-                               val = TG3_RX_STD_MAX_SIZE_5700;
-                       else
-                               val = TG3_RX_STD_MAX_SIZE_5717;
+                       val = TG3_RX_STD_RING_SIZE(tp);
                        val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
                        val |= (TG3_RX_STD_DMA_SZ << 2);
                } else
@@ -8661,6 +8695,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tg3_flag(tp, PCI_EXPRESS))
                rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
        if (tg3_flag(tp, HW_TSO_1) ||
            tg3_flag(tp, HW_TSO_2) ||
            tg3_flag(tp, HW_TSO_3))
@@ -8924,28 +8961,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        udelay(100);
 
        if (tg3_flag(tp, ENABLE_RSS)) {
-               int i = 0;
-               u32 reg = MAC_RSS_INDIR_TBL_0;
-
-               if (tp->irq_cnt == 2) {
-                       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
-                               tw32(reg, 0x0);
-                               reg += 4;
-                       }
-               } else {
-                       u32 val;
-
-                       while (i < TG3_RSS_INDIR_TBL_SIZE) {
-                               val = i % (tp->irq_cnt - 1);
-                               i++;
-                               for (; i % 8; i++) {
-                                       val <<= 4;
-                                       val |= (i % (tp->irq_cnt - 1));
-                               }
-                               tw32(reg, val);
-                               reg += 4;
-                       }
-               }
+               tg3_rss_write_indir_tbl(tp);
 
                /* Setup the "secret" hash key. */
                tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -9002,7 +9018,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        /* Prevent chip from dropping frames when flow control
         * is enabled.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (tg3_flag(tp, 57765_CLASS))
                val = 1;
        else
                val = 2;
@@ -9217,7 +9233,7 @@ static void tg3_timer(unsigned long __opaque)
        spin_lock(&tp->lock);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
 
        if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9669,6 +9685,8 @@ static int tg3_open(struct net_device *dev)
         */
        tg3_ints_init(tp);
 
+       tg3_rss_check_indir_tbl(tp);
+
        /* The placement of this call is tied
         * to the setup and use of Host TX descriptors.
         */
@@ -9700,8 +9718,8 @@ static int tg3_open(struct net_device *dev)
                tg3_free_rings(tp);
        } else {
                if (tg3_flag(tp, TAGGED_STATUS) &&
-                       GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-                       GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+                   !tg3_flag(tp, 57765_CLASS))
                        tp->timer_offset = HZ;
                else
                        tp->timer_offset = HZ / 10;
@@ -9782,10 +9800,6 @@ err_out1:
        return err;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
-                                                struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
 static int tg3_close(struct net_device *dev)
 {
        int i;
@@ -9817,10 +9831,9 @@ static int tg3_close(struct net_device *dev)
 
        tg3_ints_fini(tp);
 
-       tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
-       memcpy(&tp->estats_prev, tg3_get_estats(tp),
-              sizeof(tp->estats_prev));
+       /* Clear stats across close / open calls */
+       memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+       memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
 
        tg3_napi_fini(tp);
 
@@ -9868,9 +9881,9 @@ static u64 calc_crc_errors(struct tg3 *tp)
        estats->member =        old_estats->member + \
                                get_stat64(&hw_stats->member)
 
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
+                                              struct tg3_ethtool_stats *estats)
 {
-       struct tg3_ethtool_stats *estats = &tp->estats;
        struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
@@ -10318,12 +10331,20 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        cmd->advertising |= ADVERTISED_Asym_Pause;
                }
        }
-       if (netif_running(dev)) {
+       if (netif_running(dev) && netif_carrier_ok(dev)) {
                ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
                cmd->duplex = tp->link_config.active_duplex;
+               cmd->lp_advertising = tp->link_config.rmt_adv;
+               if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+                       if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+                               cmd->eth_tp_mdix = ETH_TP_MDI_X;
+                       else
+                               cmd->eth_tp_mdix = ETH_TP_MDI;
+               }
        } else {
                ethtool_cmd_speed_set(cmd, SPEED_INVALID);
                cmd->duplex = DUPLEX_INVALID;
+               cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
        }
        cmd->phy_address = tp->phy_addr;
        cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10449,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       strcpy(info->fw_version, tp->fw_ver);
-       strcpy(info->bus_info, pci_name(tp->pdev));
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+       strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
 }
 
 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -10590,12 +10611,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
 
        epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
 
-       if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+       if (tp->link_config.flowctrl & FLOW_CTRL_RX)
                epause->rx_pause = 1;
        else
                epause->rx_pause = 0;
 
-       if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+       if (tp->link_config.flowctrl & FLOW_CTRL_TX)
                epause->tx_pause = 1;
        else
                epause->tx_pause = 0;
@@ -10715,6 +10736,78 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                        u32 *rules __always_unused)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!tg3_flag(tp, SUPPORT_MSIX))
+               return -EOPNOTSUPP;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               if (netif_running(tp->dev))
+                       info->data = tp->irq_cnt;
+               else {
+                       info->data = num_online_cpus();
+                       if (info->data > TG3_IRQ_MAX_VECS_RSS)
+                               info->data = TG3_IRQ_MAX_VECS_RSS;
+               }
+
+               /* The first interrupt vector only
+                * handles link interrupts.
+                */
+               info->data -= 1;
+               return 0;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+       u32 size = 0;
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (tg3_flag(tp, SUPPORT_MSIX))
+               size = TG3_RSS_INDIR_TBL_SIZE;
+
+       return size;
+}
+
+static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+               indir[i] = tp->rss_ind_tbl[i];
+
+       return 0;
+}
+
+static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       size_t i;
+
+       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+               tp->rss_ind_tbl[i] = indir[i];
+
+       if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+               return 0;
+
+       /* It is legal to write the indirection
+        * table while the device is running.
+        */
+       tg3_full_lock(tp, 0);
+       tg3_rss_write_indir_tbl(tp);
+       tg3_full_unlock(tp);
+
+       return 0;
+}
+
 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        switch (stringset) {
@@ -10769,7 +10862,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
                                   struct ethtool_stats *estats, u64 *tmp_stats)
 {
        struct tg3 *tp = netdev_priv(dev);
-       memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+       tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
 }
 
 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11352,7 +11446,7 @@ static int tg3_test_memory(struct tg3 *tp)
 
        if (tg3_flag(tp, 5717_PLUS))
                mem_tbl = mem_tbl_5717;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       else if (tg3_flag(tp, 57765_CLASS))
                mem_tbl = mem_tbl_57765;
        else if (tg3_flag(tp, 5755_PLUS))
                mem_tbl = mem_tbl_5755;
@@ -11400,8 +11494,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
        u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
        u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
        u32 budget;
-       struct sk_buff *skb, *rx_skb;
-       u8 *tx_data;
+       struct sk_buff *skb;
+       u8 *tx_data, *rx_data;
        dma_addr_t map;
        int num_pkts, tx_len, rx_len, i, err;
        struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11663,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                }
 
                if (opaque_key == RXD_OPAQUE_RING_STD) {
-                       rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+                       rx_data = tpr->rx_std_buffers[desc_idx].data;
                        map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
                                             mapping);
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
-                       rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+                       rx_data = tpr->rx_jmb_buffers[desc_idx].data;
                        map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
                                             mapping);
                } else
@@ -11582,15 +11676,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
                                            PCI_DMA_FROMDEVICE);
 
+               rx_data += TG3_RX_OFFSET(tp);
                for (i = data_off; i < rx_len; i++, val++) {
-                       if (*(rx_skb->data + i) != (u8) (val & 0xff))
+                       if (*(rx_data + i) != (u8) (val & 0xff))
                                goto out;
                }
        }
 
        err = 0;
 
-       /* tg3_free_rings will unmap and free the rx_skb */
+       /* tg3_free_rings will unmap and free the rx_data */
 out:
        return err;
 }
@@ -11943,6 +12038,10 @@ static const struct ethtool_ops tg3_ethtool_ops = {
        .get_coalesce           = tg3_get_coalesce,
        .set_coalesce           = tg3_set_coalesce,
        .get_sset_count         = tg3_get_sset_count,
+       .get_rxnfc              = tg3_get_rxnfc,
+       .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
+       .get_rxfh_indir         = tg3_get_rxfh_indir,
+       .set_rxfh_indir         = tg3_set_rxfh_indir,
 };
 
 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -12612,7 +12711,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                        tg3_get_5906_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                        tg3_flag(tp, 57765_CLASS))
                        tg3_get_57780_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13218,8 +13317,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
 
 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
 {
-       u32 adv = ADVERTISED_Autoneg |
-                 ADVERTISED_Pause;
+       u32 adv = ADVERTISED_Autoneg;
 
        if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
                adv |= ADVERTISED_1000baseT_Half |
@@ -13322,7 +13420,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
            !tg3_flag(tp, ENABLE_APE) &&
            !tg3_flag(tp, ENABLE_ASF)) {
-               u32 bmsr, mask;
+               u32 bmsr, dummy;
 
                tg3_readphy(tp, MII_BMSR, &bmsr);
                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13335,10 +13433,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
 
                tg3_phy_set_wirespeed(tp);
 
-               mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
-               if (!tg3_copper_is_advertising_all(tp, mask)) {
+               if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
                        tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
                                            tp->link_config.flowctrl);
 
@@ -13460,6 +13555,17 @@ out_no_vpd:
                        strcpy(tp->board_part_number, "BCM57795");
                else
                        goto nomatch;
+       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+               if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+                       strcpy(tp->board_part_number, "BCM57762");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+                       strcpy(tp->board_part_number, "BCM57766");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+                       strcpy(tp->board_part_number, "BCM57782");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+                       strcpy(tp->board_part_number, "BCM57786");
+               else
+                       goto nomatch;
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
                strcpy(tp->board_part_number, "BCM95906");
        } else {
@@ -13798,7 +13904,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
-                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
                        pci_read_config_dword(tp->pdev,
                                              TG3PCI_GEN15_PRODID_ASICREV,
                                              &prod_id_asic_rev);
@@ -13945,7 +14055,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tg3_flag_set(tp, 5717_PLUS);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
-           tg3_flag(tp, 5717_PLUS))
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               tg3_flag_set(tp, 57765_CLASS);
+
+       if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
                tg3_flag_set(tp, 57765_PLUS);
 
        /* Intentionally exclude ASIC_REV_5906 */
@@ -13997,9 +14110,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (tg3_flag(tp, HW_TSO_1) ||
            tg3_flag(tp, HW_TSO_2) ||
            tg3_flag(tp, HW_TSO_3) ||
-           (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+           tp->fw_needed) {
+               /* For firmware TSO, assume ASF is disabled.
+                * We'll disable TSO later if we discover ASF
+                * is enabled in tg3_get_eeprom_hw_cfg().
+                */
                tg3_flag_set(tp, TSO_CAPABLE);
-       else {
+       else {
                tg3_flag_clear(tp, TSO_CAPABLE);
                tg3_flag_clear(tp, TSO_BUG);
                tp->fw_needed = NULL;
@@ -14027,6 +14144,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                if (tg3_flag(tp, 57765_PLUS)) {
                        tg3_flag_set(tp, SUPPORT_MSIX);
                        tp->irq_max = TG3_IRQ_MAX_VECS;
+                       tg3_rss_init_dflt_indir_tbl(tp);
                }
        }
 
@@ -14034,9 +14152,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
-               tg3_flag_set(tp, 4K_FIFO_LIMIT);
+               tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
 
-       if (tg3_flag(tp, 5717_PLUS))
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
                tg3_flag_set(tp, LRG_PROD_RING_CAP);
 
        if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14178,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
                tg3_flag_set(tp, PCI_EXPRESS);
 
-               tp->pcie_readrq = 4096;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
-                       tp->pcie_readrq = 2048;
-
-               pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+                       int readrq = pcie_get_readrq(tp->pdev);
+                       if (readrq > 2048)
+                               pcie_set_readrq(tp->pdev, 2048);
+               }
 
                pci_read_config_word(tp->pdev,
                                     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14394,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         */
        tg3_get_eeprom_hw_cfg(tp);
 
+       if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+               tg3_flag_clear(tp, TSO_CAPABLE);
+               tg3_flag_clear(tp, TSO_BUG);
+               tp->fw_needed = NULL;
+       }
+
        if (tg3_flag(tp, ENABLE_APE)) {
                /* Allow reads and writes to the
                 * APE register and memory space.
@@ -14311,7 +14438,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           tg3_flag(tp, 57765_CLASS))
                tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 
        if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14548,11 +14675,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        else
                tg3_flag_clear(tp, POLL_SERDES);
 
-       tp->rx_offset = NET_IP_ALIGN;
+       tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
        tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
            tg3_flag(tp, PCIX_MODE)) {
-               tp->rx_offset = 0;
+               tp->rx_offset = NET_SKB_PAD;
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
                tp->rx_copy_thresh = ~(u16)0;
 #endif
@@ -15313,7 +15440,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        u32 sndmbx, rcvmbx, intmbx;
        char str[40];
        u64 dma_mask, persist_dma_mask;
-       u32 features = 0;
+       netdev_features_t features = 0;
 
        printk_once(KERN_INFO "%s\n", version);
 
index 94b4bd049a33a8e29534e423e560faedadd94fc7..aea8f72c24fa15a4eb5c1ca0ea527c374ed434ff 100644 (file)
@@ -31,6 +31,8 @@
 #define TG3_RX_RET_MAX_SIZE_5705       512
 #define TG3_RX_RET_MAX_SIZE_5717       4096
 
+#define TG3_RSS_INDIR_TBL_SIZE         128
+
 /* First 256 bytes are a mirror of PCI config space. */
 #define TG3PCI_VENDOR                  0x00000000
 #define  TG3PCI_VENDOR_BROADCOM                 0x14e4
 #define  TG3PCI_DEVICE_TIGON3_57795     0x16b6
 #define  TG3PCI_DEVICE_TIGON3_5719      0x1657
 #define  TG3PCI_DEVICE_TIGON3_5720      0x165f
+#define  TG3PCI_DEVICE_TIGON3_57762     0x1682
+#define  TG3PCI_DEVICE_TIGON3_57766     0x1686
+#define  TG3PCI_DEVICE_TIGON3_57786     0x16b3
+#define  TG3PCI_DEVICE_TIGON3_57782     0x16b7
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM           PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6   0x1644
 #define   ASIC_REV_57765                0x57785
 #define   ASIC_REV_5719                         0x5719
 #define   ASIC_REV_5720                         0x5720
+#define   ASIC_REV_57766                0x57766
 #define  GET_CHIP_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 8)
 #define   CHIPREV_5700_AX               0x70
 #define   CHIPREV_5700_BX               0x71
 #define  RDMAC_MODE_MBUF_SBD_CRPT_ENAB  0x00002000
 #define  RDMAC_MODE_FIFO_SIZE_128       0x00020000
 #define  RDMAC_MODE_FIFO_LONG_BURST     0x00030000
+#define  RDMAC_MODE_JMB_2K_MMRR                 0x00800000
 #define  RDMAC_MODE_MULT_DMA_RD_DIS     0x01000000
 #define  RDMAC_MODE_IPV4_LSO_EN                 0x08000000
 #define  RDMAC_MODE_IPV6_LSO_EN                 0x10000000
 #define  MII_TG3_EXT_CTRL_TBI          0x8000
 
 #define MII_TG3_EXT_STAT               0x11 /* Extended status register */
+#define  MII_TG3_EXT_STAT_MDIX         0x2000
 #define  MII_TG3_EXT_STAT_LPASS                0x0100
 
 #define MII_TG3_RXR_COUNTERS           0x14 /* Local/Remote Receiver Counts */
 #define  MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
 #define  MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
 
+#define MII_TG3_FET_GEN_STAT           0x1c
+#define  MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000
+
 #define MII_TG3_FET_TEST               0x1f
 #define  MII_TG3_FET_SHADOW_EN         0x0080
 
@@ -2662,9 +2674,13 @@ struct tg3_hw_stats {
 /* 'mapping' is superfluous as the chip does not write into
  * the tx/rx post rings so we could just fetch it from there.
  * But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
  */
 struct ring_info {
-       struct sk_buff                  *skb;
+       u8                              *data;
        DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
@@ -2690,6 +2706,7 @@ struct tg3_link_config {
 #define DUPLEX_INVALID         0xff
 #define AUTONEG_INVALID                0xff
        u16                             active_speed;
+       u32                             rmt_adv;
 
        /* When we go in and out of low power mode we need
         * to swap with this state.
@@ -2865,6 +2882,8 @@ enum TG3_FLAGS {
        TG3_FLAG_NVRAM_BUFFERED,
        TG3_FLAG_SUPPORT_MSI,
        TG3_FLAG_SUPPORT_MSIX,
+       TG3_FLAG_USING_MSI,
+       TG3_FLAG_USING_MSIX,
        TG3_FLAG_PCIX_MODE,
        TG3_FLAG_PCI_HIGH_SPEED,
        TG3_FLAG_PCI_32BIT,
@@ -2880,7 +2899,6 @@ enum TG3_FLAGS {
        TG3_FLAG_CHIP_RESETTING,
        TG3_FLAG_INIT_COMPLETE,
        TG3_FLAG_TSO_BUG,
-       TG3_FLAG_IS_5788,
        TG3_FLAG_MAX_RXPEND_64,
        TG3_FLAG_TSO_CAPABLE,
        TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2889,14 +2907,9 @@ enum TG3_FLAGS {
        TG3_FLAG_IS_NIC,
        TG3_FLAG_FLASH,
        TG3_FLAG_HW_TSO_1,
-       TG3_FLAG_5705_PLUS,
-       TG3_FLAG_5750_PLUS,
+       TG3_FLAG_HW_TSO_2,
        TG3_FLAG_HW_TSO_3,
-       TG3_FLAG_USING_MSI,
-       TG3_FLAG_USING_MSIX,
        TG3_FLAG_ICH_WORKAROUND,
-       TG3_FLAG_5780_CLASS,
-       TG3_FLAG_HW_TSO_2,
        TG3_FLAG_1SHOT_MSI,
        TG3_FLAG_NO_FWARE_REPORTED,
        TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2910,18 +2923,23 @@ enum TG3_FLAGS {
        TG3_FLAG_RGMII_EXT_IBND_RX_EN,
        TG3_FLAG_RGMII_EXT_IBND_TX_EN,
        TG3_FLAG_CLKREQ_BUG,
-       TG3_FLAG_5755_PLUS,
        TG3_FLAG_NO_NVRAM,
        TG3_FLAG_ENABLE_RSS,
        TG3_FLAG_ENABLE_TSS,
        TG3_FLAG_SHORT_DMA_BUG,
        TG3_FLAG_USE_JUMBO_BDFLAG,
        TG3_FLAG_L1PLLPD_EN,
-       TG3_FLAG_57765_PLUS,
        TG3_FLAG_APE_HAS_NCSI,
-       TG3_FLAG_5717_PLUS,
        TG3_FLAG_4K_FIFO_LIMIT,
        TG3_FLAG_RESET_TASK_PENDING,
+       TG3_FLAG_5705_PLUS,
+       TG3_FLAG_IS_5788,
+       TG3_FLAG_5750_PLUS,
+       TG3_FLAG_5780_CLASS,
+       TG3_FLAG_5755_PLUS,
+       TG3_FLAG_57765_PLUS,
+       TG3_FLAG_57765_CLASS,
+       TG3_FLAG_5717_PLUS,
 
        /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
        TG3_FLAG_NUMBER_OF_FLAGS,       /* Last entry in enum TG3_FLAGS */
@@ -2985,6 +3003,7 @@ struct tg3 {
        /* begin "tx thread" cacheline section */
        void                            (*write32_tx_mbox) (struct tg3 *, u32,
                                                            u32);
+       u32                             dma_limit;
 
        /* begin "rx thread" cacheline section */
        struct tg3_napi                 napi[TG3_IRQ_MAX_VECS];
@@ -3005,7 +3024,6 @@ struct tg3 {
        unsigned long                   rx_dropped;
        unsigned long                   tx_dropped;
        struct rtnl_link_stats64        net_stats_prev;
-       struct tg3_ethtool_stats        estats;
        struct tg3_ethtool_stats        estats_prev;
 
        DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3131,10 +3149,12 @@ struct tg3 {
 #define TG3_PHYFLG_SERDES_PREEMPHASIS  0x00010000
 #define TG3_PHYFLG_PARALLEL_DETECT     0x00020000
 #define TG3_PHYFLG_EEE_CAP             0x00040000
+#define TG3_PHYFLG_MDIX_STATE          0x00200000
 
        u32                             led_ctrl;
        u32                             phy_otp;
        u32                             setlpicnt;
+       u8                              rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
 
 #define TG3_BPN_SIZE                   24
        char                            board_part_number[TG3_BPN_SIZE];
index 74d3abca1960119fe681d2155d190828845b2009..6027302ae73aca095e29e6ee44e7808aa38cc973 100644 (file)
@@ -5,7 +5,7 @@
 
 obj-$(CONFIG_BNA) += bna.o
 
-bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
 bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
 bna-objs += cna_fwimg.o
 
index 8e627186507ceee29a5f281b6c2f36b52850f0b4..29f284f79e02ace82f607509a5b27d21c3cb993b 100644 (file)
@@ -184,6 +184,41 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
                (dma_kva + bfa_cee_attr_meminfo());
 }
 
+/**
+ * bfa_cee_get_attr()
+ *
+ * @brief      Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in]  Pointer to the CEE module data structure.
+ *
+ * @return     Status
+ */
+enum bfa_status
+bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+                   bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_get_req *cmd;
+
+       BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+       if (!bfa_nw_ioc_is_operational(cee->ioc))
+               return BFA_STATUS_IOC_FAILURE;
+
+       if (cee->get_attr_pending == true)
+               return  BFA_STATUS_DEVBUSY;
+
+       cee->get_attr_pending = true;
+       cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+       cee->attr = attr;
+       cee->cbfn.get_attr_cbfn = cbfn;
+       cee->cbfn.get_attr_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+                   bfa_ioc_portid(cee->ioc));
+       bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+       bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
+
+       return BFA_STATUS_OK;
+}
+
 /**
  * bfa_cee_isrs()
  *
index 58d54e98d595d72d7b7b99379d1cfeb3ba9d6b37..93fde633d6f33762eae9691610ae27e17872f705 100644 (file)
@@ -59,5 +59,7 @@ u32 bfa_nw_cee_meminfo(void);
 void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
        u64 dma_pa);
 void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
-
+enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
+                               struct bfa_cee_attr *attr,
+                               bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
 #endif /* __BFA_CEE_H__ */
index 2f12d68021d57d4ee7924cb45746bca5acf6be30..871c6309334c60b72d3b819aa10178ed231a4109 100644 (file)
@@ -219,41 +219,39 @@ enum {
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_block {
-       u8              version;        /*!< manufacturing block version */
-       u8              mfg_sig[3];     /*!< characters 'M', 'F', 'G' */
-       u16     mfgsize;        /*!< mfg block size */
-       u16     u16_chksum;     /*!< old u16 checksum */
-       char            brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
-       char            brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
-       u8              mfg_day;        /*!< manufacturing day */
-       u8              mfg_month;      /*!< manufacturing month */
-       u16     mfg_year;       /*!< manufacturing year */
-       u64             mfg_wwn;        /*!< wwn base for this adapter */
-       u8              num_wwn;        /*!< number of wwns assigned */
-       u8              mfg_speeds;     /*!< speeds allowed for this adapter */
-       u8              rsv[2];
-       char            supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
-       char            supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
-       char
-               supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
-       char
-               supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
-       mac_t           mfg_mac;        /*!< mac address */
-       u8              num_mac;        /*!< number of mac addresses */
-       u8              rsv2;
-       u32             card_type;      /*!< card type */
-       char            cap_nic;        /*!< capability nic */
-       char            cap_cna;        /*!< capability cna */
-       char            cap_hba;        /*!< capability hba */
-       char            cap_fc16g;      /*!< capability fc 16g */
-       char            cap_sriov;      /*!< capability sriov */
-       char            cap_mezz;       /*!< capability mezz */
-       u8              rsv3;
-       u8              mfg_nports;     /*!< number of ports */
-       char            media[8];       /*!< xfi/xaui */
-       char            initial_mode[8];/*!< initial mode: hba/cna/nic */
-       u8              rsv4[84];
-       u8              md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+       u8      version;        /* manufacturing block version */
+       u8      mfg_sig[3];     /* characters 'M', 'F', 'G' */
+       u16     mfgsize;        /* mfg block size */
+       u16     u16_chksum;     /* old u16 checksum */
+       char    brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+       char    brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+       u8      mfg_day;        /* manufacturing day */
+       u8      mfg_month;      /* manufacturing month */
+       u16     mfg_year;       /* manufacturing year */
+       u64     mfg_wwn;        /* wwn base for this adapter */
+       u8      num_wwn;        /* number of wwns assigned */
+       u8      mfg_speeds;     /* speeds allowed for this adapter */
+       u8      rsv[2];
+       char    supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+       char    supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+       char    supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+       char    supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+       mac_t   mfg_mac;        /* base mac address */
+       u8      num_mac;        /* number of mac addresses */
+       u8      rsv2;
+       u32     card_type;      /* card type          */
+       char    cap_nic;        /* capability nic     */
+       char    cap_cna;        /* capability cna     */
+       char    cap_hba;        /* capability hba     */
+       char    cap_fc16g;      /* capability fc 16g      */
+       char    cap_sriov;      /* capability sriov       */
+       char    cap_mezz;       /* capability mezz        */
+       u8      rsv3;
+       u8      mfg_nports;     /* number of ports        */
+       char    media[8];       /* xfi/xaui           */
+       char    initial_mode[8]; /* initial mode: hba/cna/nic */
+       u8      rsv4[84];
+       u8      md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
 };
 
 #pragma pack()
@@ -293,4 +291,34 @@ enum bfa_mode {
        BFA_MODE_NIC            = 3
 };
 
+/*
+ *     Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE      32      /* partition entry size */
+#define BFA_FLASH_PART_MAX             32      /* maximal # of partitions */
+#define BFA_TOTAL_FLASH_SIZE           0x400000
+#define BFA_FLASH_PART_MFG             7
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr {
+       u32     part_type;      /* partition type */
+       u32     part_instance;  /* partition instance */
+       u32     part_off;       /* partition offset */
+       u32     part_size;      /* partition size */
+       u32     part_len;       /* partition content length */
+       u32     part_status;    /* partition status */
+       char    rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr {
+       u32     status; /* flash overall status */
+       u32     npart;  /* num of partitions */
+       struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
+};
+
 #endif /* __BFA_DEFS_H__ */
index b0307a00a109e06ca876e25d42c44773b51250be..abfad275b5f35e610bec06075c211720b3ff20f0 100644 (file)
@@ -74,6 +74,7 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
@@ -997,6 +998,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
 {
+       bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -1743,6 +1745,114 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
                bfa_q_deq(&mod->cmd_q, &cmd);
 }
 
+/**
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in]  ioc     memory for IOC
+ * @param[in]  tbuf    app memory to store data from smem
+ * @param[in]  soff    smem offset
+ * @param[in]  sz      size of smem in bytes
+ */
+static int
+bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
+{
+       u32 pgnum, loff, r32;
+       int i, len;
+       u32 *buf = tbuf;
+
+       pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+       loff = PSS_SMEM_PGOFF(soff);
+
+       /*
+        *  Hold semaphore to serialize pll init and fwtrc.
+       */
+       if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+               return 1;
+
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+       len = sz/sizeof(u32);
+       for (i = 0; i < len; i++) {
+               r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+               buf[i] = be32_to_cpu(r32);
+               loff += sizeof(u32);
+
+               /**
+                * handle page offset wrap around
+                */
+               loff = PSS_SMEM_PGOFF(loff);
+               if (loff == 0) {
+                       pgnum++;
+                       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+               }
+       }
+
+       writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+              ioc->ioc_regs.host_page_num_fn);
+
+       /*
+        * release semaphore
+        */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
+       writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+       return 0;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+       u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
+       int tlen, status = 0;
+
+       tlen = *trclen;
+       if (tlen > BNA_DBG_FWTRC_LEN)
+               tlen = BNA_DBG_FWTRC_LEN;
+
+       status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
+       *trclen = tlen;
+       return status;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
+{
+       int tlen;
+
+       if (ioc->dbg_fwsave_once) {
+               ioc->dbg_fwsave_once = 0;
+               if (ioc->dbg_fwsave_len) {
+                       tlen = ioc->dbg_fwsave_len;
+                       bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+               }
+       }
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+       int tlen;
+
+       if (ioc->dbg_fwsave_len == 0)
+               return BFA_STATUS_ENOFSAVE;
+
+       tlen = *trclen;
+       if (tlen > ioc->dbg_fwsave_len)
+               tlen = ioc->dbg_fwsave_len;
+
+       memcpy(trcdata, ioc->dbg_fwsave, tlen);
+       *trclen = tlen;
+       return BFA_STATUS_OK;
+}
+
 static void
 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
 {
@@ -1751,6 +1861,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
         */
        ioc->cbfn->hbfail_cbfn(ioc->bfa);
        bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+       bfa_nw_ioc_debug_save_ftrc(ioc);
 }
 
 /**
@@ -2058,6 +2169,16 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
 }
 
+/**
+ * Initialize memory for saving firmware trace.
+ */
+void
+bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
+{
+       ioc->dbg_fwsave = dbg_fwsave;
+       ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
+}
+
 static u32
 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
 {
@@ -2171,6 +2292,15 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
+/**
+ * return true if IOC is operational
+ */
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+       return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
 /**
  * Add to IOC heartbeat failure notification queue. To be used by common
  * modules such as cee, port, diag.
@@ -2471,3 +2601,366 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
                        msecs_to_jiffies(BFA_IOC_POLL_TOV));
        }
 }
+
+/*
+ *     Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ       2048
+#define BFA_FLASH_DMA_BUF_SZ   \
+       roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash *flash)
+{
+       flash->op_busy = 0;
+       if (flash->cbfn)
+               flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
+{
+       struct bfa_flash *flash = cbarg;
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (flash->op_busy) {
+                       flash->status = BFA_STATUS_IOC_FAILURE;
+                       flash->cbfn(flash->cbarg, flash->status);
+                       flash->op_busy = 0;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash *flash)
+{
+       struct bfi_flash_write_req *msg =
+                       (struct bfi_flash_write_req *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+              flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+
+       /* indicate if it's the last msg of the whole write operation */
+       msg->last = (len == flash->residue) ? 1 : 0;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+                   bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+       bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+       flash->residue -= len;
+       flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+       struct bfa_flash *flash = cbarg;
+       struct bfi_flash_read_req *msg =
+                       (struct bfi_flash_read_req *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+              flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+                   bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
+{
+       struct bfa_flash *flash = flasharg;
+       u32     status;
+
+       union {
+               struct bfi_flash_query_rsp *query;
+               struct bfi_flash_write_rsp *write;
+               struct bfi_flash_read_rsp *read;
+               struct bfi_mbmsg   *msg;
+       } m;
+
+       m.msg = msg;
+
+       /* receiving response after ioc failure */
+       if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
+               return;
+
+       switch (msg->mh.msg_id) {
+       case BFI_FLASH_I2H_QUERY_RSP:
+               status = be32_to_cpu(m.query->status);
+               if (status == BFA_STATUS_OK) {
+                       u32     i;
+                       struct bfa_flash_attr *attr, *f;
+
+                       attr = (struct bfa_flash_attr *) flash->ubuf;
+                       f = (struct bfa_flash_attr *) flash->dbuf_kva;
+                       attr->status = be32_to_cpu(f->status);
+                       attr->npart = be32_to_cpu(f->npart);
+                       for (i = 0; i < attr->npart; i++) {
+                               attr->part[i].part_type =
+                                       be32_to_cpu(f->part[i].part_type);
+                               attr->part[i].part_instance =
+                                       be32_to_cpu(f->part[i].part_instance);
+                               attr->part[i].part_off =
+                                       be32_to_cpu(f->part[i].part_off);
+                               attr->part[i].part_size =
+                                       be32_to_cpu(f->part[i].part_size);
+                               attr->part[i].part_len =
+                                       be32_to_cpu(f->part[i].part_len);
+                               attr->part[i].part_status =
+                                       be32_to_cpu(f->part[i].part_status);
+                       }
+               }
+               flash->status = status;
+               bfa_flash_cb(flash);
+               break;
+       case BFI_FLASH_I2H_WRITE_RSP:
+               status = be32_to_cpu(m.write->status);
+               if (status != BFA_STATUS_OK || flash->residue == 0) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else
+                       bfa_flash_write_send(flash);
+               break;
+       case BFI_FLASH_I2H_READ_RSP:
+               status = be32_to_cpu(m.read->status);
+               if (status != BFA_STATUS_OK) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else {
+                       u32 len = be32_to_cpu(m.read->length);
+                       memcpy(flash->ubuf + flash->offset,
+                              flash->dbuf_kva, len);
+                       flash->residue -= len;
+                       flash->offset += len;
+                       if (flash->residue == 0) {
+                               flash->status = status;
+                               bfa_flash_cb(flash);
+                       } else
+                               bfa_flash_read_send(flash);
+               }
+               break;
+       case BFI_FLASH_I2H_BOOT_VER_RSP:
+       case BFI_FLASH_I2H_EVENT:
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+/*
+ * Flash memory info API.
+ */
+u32
+bfa_nw_flash_meminfo(void)
+{
+       return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ */
+void
+bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
+{
+       flash->ioc = ioc;
+       flash->cbfn = NULL;
+       flash->cbarg = NULL;
+       flash->op_busy = 0;
+
+       bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+       bfa_q_qe_init(&flash->ioc_notify);
+       bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+       list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ */
+void
+bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
+{
+       flash->dbuf_kva = dm_kva;
+       flash->dbuf_pa = dm_pa;
+       memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+       dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+       dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
+                     bfa_cb_flash cbfn, void *cbarg)
+{
+       struct bfi_flash_query_req *msg =
+                       (struct bfi_flash_query_req *) flash->mb.msg;
+
+       if (!bfa_nw_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (flash->op_busy)
+               return BFA_STATUS_DEVBUSY;
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->ubuf = (u8 *) attr;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+                   bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
+       bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
+                        void *buf, u32 len, u32 offset,
+                        bfa_cb_flash cbfn, void *cbarg)
+{
+       if (!bfa_nw_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (type == BFA_FLASH_PART_MFG)
+               return BFA_STATUS_EINVAL;
+
+       if (flash->op_busy)
+               return BFA_STATUS_DEVBUSY;
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+
+       bfa_flash_write_send(flash);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
+                      void *buf, u32 len, u32 offset,
+                      bfa_cb_flash cbfn, void *cbarg)
+{
+       if (!bfa_nw_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (flash->op_busy)
+               return BFA_STATUS_DEVBUSY;
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+
+       bfa_flash_read_send(flash);
+
+       return BFA_STATUS_OK;
+}
index ca158d1eaef3a61718ff15c4101a81ee596fe851..3b4460fdc148b09d269efa9359080c8ffcf7626d 100644 (file)
@@ -27,6 +27,8 @@
 #define BFA_IOC_HWSEM_TOV      500     /* msecs */
 #define BFA_IOC_HB_TOV         500     /* msecs */
 #define BFA_IOC_POLL_TOV       200     /* msecs */
+#define BNA_DBG_FWTRC_LEN      (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
+                               BFI_IOC_TRC_HDR_SZ)
 
 /**
  * PCI device information required by IOC
@@ -68,6 +70,16 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
        dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
 }
 
+#define bfa_alen_set(__alen, __len, __pa)      \
+       __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
+{
+       alen->al_len = cpu_to_be32(len);
+       bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
 struct bfa_ioc_regs {
        void __iomem *hfn_mbox_cmd;
        void __iomem *hfn_mbox;
@@ -296,6 +308,7 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
 
 void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
 bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
 void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
        struct bfa_ioc_notify *notify);
@@ -307,6 +320,9 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
 bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
                        struct bfi_ioc_image_hdr *fwhdr);
 mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
+int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
+int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
 
 /*
  * Timeout APIs
@@ -322,4 +338,42 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
 u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
 u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
 
+/*
+ *     Flash module specific
+ */
+typedef void   (*bfa_cb_flash) (void *cbarg, enum bfa_status status);
+
+struct bfa_flash {
+       struct bfa_ioc *ioc;            /* back pointer to ioc */
+       u32             type;           /* partition type */
+       u8              instance;       /* partition instance */
+       u8              rsv[3];
+       u32             op_busy;        /*  operation busy flag */
+       u32             residue;        /*  residual length */
+       u32             offset;         /*  offset */
+       enum bfa_status status;         /*  status */
+       u8              *dbuf_kva;      /*  dma buf virtual address */
+       u64             dbuf_pa;        /*  dma buf physical address */
+       bfa_cb_flash    cbfn;           /*  user callback function */
+       void            *cbarg;         /*  user callback arg */
+       u8              *ubuf;          /*  user supplied buffer */
+       u32             addr_off;       /*  partition address offset */
+       struct bfa_mbox_cmd mb;         /*  mailbox */
+       struct bfa_ioc_notify ioc_notify; /*  ioc event notify */
+};
+
+enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
+                       struct bfa_flash_attr *attr,
+                       bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
+                       u32 type, u8 instance, void *buf, u32 len, u32 offset,
+                       bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
+                       u32 type, u8 instance, void *buf, u32 len, u32 offset,
+                       bfa_cb_flash cbfn, void *cbarg);
+u32    bfa_nw_flash_meminfo(void);
+void   bfa_nw_flash_attach(struct bfa_flash *flash,
+                           struct bfa_ioc *ioc, void *dev);
+void   bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
+
 #endif /* __BFA_IOC_H__ */
index 7a1393aabd43c73ed667c6652e4531656e4062a4..0d9df695397a92d87efc571bf2ff4dff534df2fa 100644 (file)
@@ -83,6 +83,14 @@ union bfi_addr_u {
        } a32;
 };
 
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen {
+       union bfi_addr_u        al_addr;        /* DMA addr of buffer   */
+       u32                     al_len;         /* length of buffer */
+};
+
 /*
  * Large Message structure - 128 Bytes size Msgs
  */
@@ -249,6 +257,8 @@ struct bfi_ioc_getattr_reply {
  */
 #define BFI_IOC_TRC_OFF                (0x4b00)
 #define BFI_IOC_TRC_ENTS       256
+#define BFI_IOC_TRC_ENT_SZ     16
+#define BFI_IOC_TRC_HDR_SZ     32
 
 #define BFI_IOC_FW_SIGNATURE   (0xbfadbfad)
 #define BFI_IOC_MD5SUM_SZ      4
@@ -476,6 +486,93 @@ struct bfi_msgq_i2h_cmdq_copy_req {
        u16     len;
 };
 
+/*
+ *      FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+       BFI_FLASH_H2I_QUERY_REQ = 1,
+       BFI_FLASH_H2I_ERASE_REQ = 2,
+       BFI_FLASH_H2I_WRITE_REQ = 3,
+       BFI_FLASH_H2I_READ_REQ = 4,
+       BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+       BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+       BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+       BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+       BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+       BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+       BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req {
+       struct bfi_mhdr mh;   /* Common msg header */
+       struct bfi_alen alen;
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req {
+       struct bfi_mhdr mh;     /* Common msg header */
+       struct bfi_alen alen;
+       u32     type;   /* partition type */
+       u8      instance; /* partition instance */
+       u8      last;
+       u8      rsv[2];
+       u32     offset;
+       u32     length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     offset;
+       u32     length;
+       struct bfi_alen alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp {
+       struct bfi_mhdr mh;     /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
 #pragma pack()
 
 #endif /* __BFI_H__ */
index 26f5c5abfd1f50aced1931148dbb66b3dd81b161..9ccc586e37671cc664377f317f5b844218d037a4 100644 (file)
@@ -1727,6 +1727,7 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
        bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
 
        kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+       bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
 
        /**
         * Attach common modules (Diag, SFP, CEE, Port) and claim respective
@@ -1740,6 +1741,11 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
        kva += bfa_nw_cee_meminfo();
        dma += bfa_nw_cee_meminfo();
 
+       bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
+       bfa_nw_flash_memclaim(&bna->flash, kva, dma);
+       kva += bfa_nw_flash_meminfo();
+       dma += bfa_nw_flash_meminfo();
+
        bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
        bfa_msgq_memclaim(&bna->msgq, kva, dma);
        bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
@@ -1892,7 +1898,8 @@ bna_res_req(struct bna_res_info *res_info)
        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
                                (bfa_nw_cee_meminfo() +
-                               bfa_msgq_meminfo()), PAGE_SIZE);
+                                bfa_nw_flash_meminfo() +
+                                bfa_msgq_meminfo()), PAGE_SIZE);
 
        /* DMA memory for retrieving IOC attributes */
        res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
@@ -1904,8 +1911,8 @@ bna_res_req(struct bna_res_info *res_info)
        /* Virtual memory for retreiving fw_trc */
        res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
-       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
-       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
+       res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
 
        /* DMA memory for retreiving stats */
        res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
index d090fbfb12fa4697c72093f46c6b8c12a62156b3..8e57fc5c586894d00b9ce6af8b145a0bf6b3b03c 100644 (file)
@@ -966,6 +966,7 @@ struct bna {
 
        struct bna_ioceth ioceth;
        struct bfa_cee cee;
+       struct bfa_flash flash;
        struct bfa_msgq msgq;
 
        struct bna_ethport ethport;
index 7f3091e7eb42f6c459f40b5471d3e57abacb8851..2eddbaa5db47a8266a1362117f2c55afe7f1cdcd 100644 (file)
@@ -44,11 +44,18 @@ static uint bnad_ioc_auto_recover = 1;
 module_param(bnad_ioc_auto_recover, uint, 0444);
 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
 
+static uint bna_debugfs_enable = 1;
+module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
+                " Range[false:0|true:1]");
+
 /*
  * Global variables
  */
 u32 bnad_rxqs_per_cq = 2;
-
+u32 bna_id;
+struct mutex bnad_list_mutex;
+LIST_HEAD(bnad_list);
 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
 /*
@@ -75,6 +82,23 @@ do {                                                         \
 
 #define BNAD_TXRX_SYNC_MDELAY  250     /* 250 msecs */
 
+static void
+bnad_add_to_list(struct bnad *bnad)
+{
+       mutex_lock(&bnad_list_mutex);
+       list_add_tail(&bnad->list_entry, &bnad_list);
+       bnad->id = bna_id++;
+       mutex_unlock(&bnad_list_mutex);
+}
+
+static void
+bnad_remove_from_list(struct bnad *bnad)
+{
+       mutex_lock(&bnad_list_mutex);
+       list_del(&bnad->list_entry);
+       mutex_unlock(&bnad_list_mutex);
+}
+
 /*
  * Reinitialize completions in CQ, once Rx is taken down
  */
@@ -723,7 +747,7 @@ void
 bnad_cb_ethport_link_status(struct bnad *bnad,
                        enum bna_link_status link_status)
 {
-       bool link_up = 0;
+       bool link_up = false;
 
        link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
 
@@ -1084,6 +1108,16 @@ bnad_cb_enet_mtu_set(struct bnad *bnad)
        complete(&bnad->bnad_completions.mtu_comp);
 }
 
+void
+bnad_cb_completion(void *arg, enum bfa_status status)
+{
+       struct bnad_iocmd_comp *iocmd_comp =
+                       (struct bnad_iocmd_comp *)arg;
+
+       iocmd_comp->comp_status = (u32) status;
+       complete(&iocmd_comp->comp);
+}
+
 /* Resource allocation, free functions */
 
 static void
@@ -2968,7 +3002,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
        return err;
 }
 
-static void
+static int
 bnad_vlan_rx_add_vid(struct net_device *netdev,
                                 unsigned short vid)
 {
@@ -2976,7 +3010,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
        unsigned long flags;
 
        if (!bnad->rx_info[0].rx)
-               return;
+               return 0;
 
        mutex_lock(&bnad->conf_mutex);
 
@@ -2986,9 +3020,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        mutex_unlock(&bnad->conf_mutex);
+
+       return 0;
 }
 
-static void
+static int
 bnad_vlan_rx_kill_vid(struct net_device *netdev,
                                  unsigned short vid)
 {
@@ -2996,7 +3032,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
        unsigned long flags;
 
        if (!bnad->rx_info[0].rx)
-               return;
+               return 0;
 
        mutex_lock(&bnad->conf_mutex);
 
@@ -3006,6 +3042,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        mutex_unlock(&bnad->conf_mutex);
+
+       return 0;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3163,12 +3201,14 @@ bnad_lock_init(struct bnad *bnad)
 {
        spin_lock_init(&bnad->bna_lock);
        mutex_init(&bnad->conf_mutex);
+       mutex_init(&bnad_list_mutex);
 }
 
 static void
 bnad_lock_uninit(struct bnad *bnad)
 {
        mutex_destroy(&bnad->conf_mutex);
+       mutex_destroy(&bnad_list_mutex);
 }
 
 /* PCI Initialization */
@@ -3186,7 +3226,7 @@ bnad_pci_init(struct bnad *bnad,
                goto disable_device;
        if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
            !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               *using_dac = 1;
+               *using_dac = true;
        } else {
                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
@@ -3195,7 +3235,7 @@ bnad_pci_init(struct bnad *bnad,
                        if (err)
                                goto release_regions;
                }
-               *using_dac = 0;
+               *using_dac = false;
        }
        pci_set_master(pdev);
        return 0;
@@ -3249,8 +3289,8 @@ bnad_pci_probe(struct pci_dev *pdev,
                return err;
        }
        bnad = netdev_priv(netdev);
-
        bnad_lock_init(bnad);
+       bnad_add_to_list(bnad);
 
        mutex_lock(&bnad->conf_mutex);
        /*
@@ -3277,6 +3317,10 @@ bnad_pci_probe(struct pci_dev *pdev,
        /* Set link to down state */
        netif_carrier_off(netdev);
 
+       /* Setup the debugfs node for this bfad */
+       if (bna_debugfs_enable)
+               bnad_debugfs_init(bnad);
+
        /* Get resource requirement form bna */
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_res_req(&bnad->res_info[0]);
@@ -3398,11 +3442,15 @@ disable_ioceth:
 res_free:
        bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
 drv_uninit:
+       /* Remove the debugfs node for this bnad */
+       kfree(bnad->regdata);
+       bnad_debugfs_uninit(bnad);
        bnad_uninit(bnad);
 pci_uninit:
        bnad_pci_uninit(pdev);
 unlock_mutex:
        mutex_unlock(&bnad->conf_mutex);
+       bnad_remove_from_list(bnad);
        bnad_lock_uninit(bnad);
        free_netdev(netdev);
        return err;
@@ -3441,7 +3489,11 @@ bnad_pci_remove(struct pci_dev *pdev)
        bnad_disable_msix(bnad);
        bnad_pci_uninit(pdev);
        mutex_unlock(&bnad->conf_mutex);
+       bnad_remove_from_list(bnad);
        bnad_lock_uninit(bnad);
+       /* Remove the debugfs node for this bnad */
+       kfree(bnad->regdata);
+       bnad_debugfs_uninit(bnad);
        bnad_uninit(bnad);
        free_netdev(netdev);
 }
index 5487ca42d0185dae702cc14585fb6308a5f08d07..c975ce672f480ab48852939607ccdfe1b1714c49 100644 (file)
@@ -124,6 +124,12 @@ enum bnad_link_state {
        BNAD_LS_UP              = 1
 };
 
+struct bnad_iocmd_comp {
+       struct bnad             *bnad;
+       struct completion       comp;
+       int                     comp_status;
+};
+
 struct bnad_completion {
        struct completion       ioc_comp;
        struct completion       ucast_comp;
@@ -251,6 +257,8 @@ struct bnad_unmap_q {
 
 struct bnad {
        struct net_device       *netdev;
+       u32                     id;
+       struct list_head        list_entry;
 
        /* Data path */
        struct bnad_tx_info tx_info[BNAD_MAX_TX];
@@ -320,6 +328,20 @@ struct bnad {
        char                    adapter_name[BNAD_NAME_LEN];
        char                    port_name[BNAD_NAME_LEN];
        char                    mbox_irq_name[BNAD_NAME_LEN];
+
+       /* debugfs specific data */
+       char    *regdata;
+       u32     reglen;
+       struct dentry *bnad_dentry_files[5];
+       struct dentry *port_debugfs_root;
+};
+
+struct bnad_drvinfo {
+       struct bfa_ioc_attr  ioc_attr;
+       struct bfa_cee_attr  cee_attr;
+       struct bfa_flash_attr flash_attr;
+       u32     cee_status;
+       u32     flash_status;
 };
 
 /*
@@ -340,6 +362,7 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
 extern int bnad_enable_default_bcast(struct bnad *bnad);
 extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
 extern void bnad_set_ethtool_ops(struct net_device *netdev);
+extern void bnad_cb_completion(void *arg, enum bfa_status status);
 
 /* Configuration & setup */
 extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
@@ -359,6 +382,10 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad,
 extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
                struct rtnl_link_stats64 *stats);
 
+/* Debugfs */
+void   bnad_debugfs_init(struct bnad *bnad);
+void   bnad_debugfs_uninit(struct bnad *bnad);
+
 /**
  * MACROS
  */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
new file mode 100644 (file)
index 0000000..592ad39
--- /dev/null
@@ -0,0 +1,623 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "bnad.h"
+
+/*
+ * BNA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ *     mount -t debugfs none /sys/kernel/debug
+ *
+ * BNA Hierarchy:
+ *     - bna/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
+ *
+ * Debugging service available per pci_dev:
+ *     fwtrc:  To collect current firmware trace.
+ *     fwsave: To collect last saved fw trace as a result of firmware crash.
+ *     regwr:  To write one word to chip register
+ *     regrd:  To read one or more words from chip register.
+ */
+
+struct bnad_debug_info {
+       char *debug_buffer;
+       void *i_private;
+       int buffer_len;
+};
+
+static int
+bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+       struct bnad *bnad = inode->i_private;
+       struct bnad_debug_info *fw_debug;
+       unsigned long flags;
+       int rc;
+
+       fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!fw_debug)
+               return -ENOMEM;
+
+       fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+       fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+       if (!fw_debug->debug_buffer) {
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
+                       fw_debug->debug_buffer,
+                       &fw_debug->buffer_len);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       if (rc != BFA_STATUS_OK) {
+               kfree(fw_debug->debug_buffer);
+               fw_debug->debug_buffer = NULL;
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bnad %s: Failed to collect fwtrc\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       file->private_data = fw_debug;
+
+       return 0;
+}
+
+static int
+bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+       struct bnad *bnad = inode->i_private;
+       struct bnad_debug_info *fw_debug;
+       unsigned long flags;
+       int rc;
+
+       fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!fw_debug)
+               return -ENOMEM;
+
+       fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+       fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+       if (!fw_debug->debug_buffer) {
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bna %s: Failed to allocate fwsave buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
+                       fw_debug->debug_buffer,
+                       &fw_debug->buffer_len);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
+               kfree(fw_debug->debug_buffer);
+               fw_debug->debug_buffer = NULL;
+               kfree(fw_debug);
+               fw_debug = NULL;
+               pr_warn("bna %s: Failed to collect fwsave\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       file->private_data = fw_debug;
+
+       return 0;
+}
+
+static int
+bnad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+       struct bnad_debug_info *reg_debug;
+
+       reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!reg_debug)
+               return -ENOMEM;
+
+       reg_debug->i_private = inode->i_private;
+
+       file->private_data = reg_debug;
+
+       return 0;
+}
+
+static int
+bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
+{
+       struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
+       struct bnad_iocmd_comp fcomp;
+       unsigned long flags = 0;
+       int ret = BFA_STATUS_FAILED;
+
+       /* Get IOC info */
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       /* Retrieve CEE related info */
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto out;
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       drvinfo->cee_status = fcomp.comp_status;
+
+       /* Retrieve flash partition info */
+       fcomp.comp_status = 0;
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto out;
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       drvinfo->flash_status = fcomp.comp_status;
+out:
+       return ret;
+}
+
+static int
+bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
+{
+       struct bnad *bnad = inode->i_private;
+       struct bnad_debug_info *drv_info;
+       int rc;
+
+       drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+       if (!drv_info)
+               return -ENOMEM;
+
+       drv_info->buffer_len = sizeof(struct bnad_drvinfo);
+
+       drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
+       if (!drv_info->debug_buffer) {
+               kfree(drv_info);
+               drv_info = NULL;
+               pr_warn("bna %s: Failed to allocate drv info buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       mutex_lock(&bnad->conf_mutex);
+       rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
+                               drv_info->buffer_len);
+       mutex_unlock(&bnad->conf_mutex);
+       if (rc != BFA_STATUS_OK) {
+               kfree(drv_info->debug_buffer);
+               drv_info->debug_buffer = NULL;
+               kfree(drv_info);
+               drv_info = NULL;
+               pr_warn("bna %s: Failed to collect drvinfo\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       file->private_data = drv_info;
+
+       return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+       loff_t pos = file->f_pos;
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug)
+               return -EINVAL;
+
+       switch (orig) {
+       case 0:
+               file->f_pos = offset;
+               break;
+       case 1:
+               file->f_pos += offset;
+               break;
+       case 2:
+               file->f_pos = debug->buffer_len - offset;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
+               file->f_pos = pos;
+               return -EINVAL;
+       }
+
+       return file->f_pos;
+}
+
+static ssize_t
+bnad_debugfs_read(struct file *file, char __user *buf,
+                 size_t nbytes, loff_t *pos)
+{
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug || !debug->debug_buffer)
+               return 0;
+
+       return simple_read_from_buffer(buf, nbytes, pos,
+                               debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ      (0x40000)
+#define BFA_REG_CB_ADDRSZ      (0x20000)
+#define BFA_REG_ADDRSZ(__ioc)  \
+       ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ?  \
+        BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
+
+/*
+ * Function to check if the register offset passed is valid.
+ */
+static int
+bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
+{
+       u8 area;
+
+       /* check [16:15] */
+       area = (offset >> 15) & 0x7;
+       if (area == 0) {
+               /* PCIe core register */
+               if ((offset + (len<<2)) > 0x8000)       /* 8k dwords or 32KB */
+                       return BFA_STATUS_EINVAL;
+       } else if (area == 0x1) {
+               /* CB 32 KB memory page */
+               if ((offset + (len<<2)) > 0x10000)      /* 8k dwords or 32KB */
+                       return BFA_STATUS_EINVAL;
+       } else {
+               /* CB register space 64KB */
+               if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+                       return BFA_STATUS_EINVAL;
+       }
+       return BFA_STATUS_OK;
+}
+
+static ssize_t
+bnad_debugfs_read_regrd(struct file *file, char __user *buf,
+                       size_t nbytes, loff_t *pos)
+{
+       struct bnad_debug_info *regrd_debug = file->private_data;
+       struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+       ssize_t rc;
+
+       if (!bnad->regdata)
+               return 0;
+
+       rc = simple_read_from_buffer(buf, nbytes, pos,
+                       bnad->regdata, bnad->reglen);
+
+       if ((*pos + nbytes) >= bnad->reglen) {
+               kfree(bnad->regdata);
+               bnad->regdata = NULL;
+               bnad->reglen = 0;
+       }
+
+       return rc;
+}
+
+static ssize_t
+bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+               size_t nbytes, loff_t *ppos)
+{
+       struct bnad_debug_info *regrd_debug = file->private_data;
+       struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+       struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+       int addr, len, rc, i;
+       u32 *regbuf;
+       void __iomem *rb, *reg_addr;
+       unsigned long flags;
+       void *kern_buf;
+
+       /* Allocate memory to store the user space buf */
+       kern_buf = kzalloc(nbytes, GFP_KERNEL);
+       if (!kern_buf) {
+               pr_warn("bna %s: Failed to allocate user buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+               kfree(kern_buf);
+               return -ENOMEM;
+       }
+
+       rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+       if (rc < 2) {
+               pr_warn("bna %s: Failed to read user buffer\n",
+                       pci_name(bnad->pcidev));
+               kfree(kern_buf);
+               return -EINVAL;
+       }
+
+       kfree(kern_buf);
+       kfree(bnad->regdata);
+       bnad->regdata = NULL;
+       bnad->reglen = 0;
+
+       bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
+       if (!bnad->regdata) {
+               pr_warn("bna %s: Failed to allocate regrd buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       bnad->reglen = len << 2;
+       rb = bfa_ioc_bar0(ioc);
+       addr &= BFA_REG_ADDRMSK(ioc);
+
+       /* offset and len sanity check */
+       rc = bna_reg_offset_check(ioc, addr, len);
+       if (rc) {
+               pr_warn("bna %s: Failed reg offset check\n",
+                       pci_name(bnad->pcidev));
+               kfree(bnad->regdata);
+               bnad->regdata = NULL;
+               bnad->reglen = 0;
+               return -EINVAL;
+       }
+
+       reg_addr = rb + addr;
+       regbuf =  (u32 *)bnad->regdata;
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       for (i = 0; i < len; i++) {
+               *regbuf = readl(reg_addr);
+               regbuf++;
+               reg_addr += sizeof(u32);
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       return nbytes;
+}
+
+static ssize_t
+bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+               size_t nbytes, loff_t *ppos)
+{
+       struct bnad_debug_info *debug = file->private_data;
+       struct bnad *bnad = (struct bnad *)debug->i_private;
+       struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+       int addr, val, rc;
+       void __iomem *reg_addr;
+       unsigned long flags;
+       void *kern_buf;
+
+       /* Allocate memory to store the user space buf */
+       kern_buf = kzalloc(nbytes, GFP_KERNEL);
+       if (!kern_buf) {
+               pr_warn("bna %s: Failed to allocate user buffer\n",
+                       pci_name(bnad->pcidev));
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+               kfree(kern_buf);
+               return -ENOMEM;
+       }
+
+       rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+       if (rc < 2) {
+               pr_warn("bna %s: Failed to read user buffer\n",
+                       pci_name(bnad->pcidev));
+               kfree(kern_buf);
+               return -EINVAL;
+       }
+       kfree(kern_buf);
+
+       addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+       /* offset and len sanity check */
+       rc = bna_reg_offset_check(ioc, addr, 1);
+       if (rc) {
+               pr_warn("bna %s: Failed reg offset check\n",
+                       pci_name(bnad->pcidev));
+               return -EINVAL;
+       }
+
+       reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       writel(val, reg_addr);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       return nbytes;
+}
+
+static int
+bnad_debugfs_release(struct inode *inode, struct file *file)
+{
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug)
+               return 0;
+
+       file->private_data = NULL;
+       kfree(debug);
+       return 0;
+}
+
+static int
+bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
+{
+       struct bnad_debug_info *debug = file->private_data;
+
+       if (!debug)
+               return 0;
+
+       kfree(debug->debug_buffer);
+
+       file->private_data = NULL;
+       kfree(debug);
+       debug = NULL;
+       return 0;
+}
+
+static const struct file_operations bnad_debugfs_op_fwtrc = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_fwtrc,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read,
+       .release        =       bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_fwsave = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_fwsave,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read,
+       .release        =       bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regrd = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_reg,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read_regrd,
+       .write          =       bnad_debugfs_write_regrd,
+       .release        =       bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regwr = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_reg,
+       .llseek         =       bnad_debugfs_lseek,
+       .write          =       bnad_debugfs_write_regwr,
+       .release        =       bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_drvinfo = {
+       .owner          =       THIS_MODULE,
+       .open           =       bnad_debugfs_open_drvinfo,
+       .llseek         =       bnad_debugfs_lseek,
+       .read           =       bnad_debugfs_read,
+       .release        =       bnad_debugfs_buffer_release,
+};
+
+struct bnad_debugfs_entry {
+       const char *name;
+       mode_t  mode;
+       const struct file_operations *fops;
+};
+
+static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
+       { "fwtrc",  S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
+       { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
+       { "regrd",  S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
+       { "regwr",  S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
+       { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+};
+
+static struct dentry *bna_debugfs_root;
+static atomic_t bna_debugfs_port_count;
+
+/* Initialize debugfs interface for BNA */
+void
+bnad_debugfs_init(struct bnad *bnad)
+{
+       const struct bnad_debugfs_entry *file;
+       char name[64];
+       int i;
+
+       /* Setup the BNA debugfs root directory*/
+       if (!bna_debugfs_root) {
+               bna_debugfs_root = debugfs_create_dir("bna", NULL);
+               atomic_set(&bna_debugfs_port_count, 0);
+               if (!bna_debugfs_root) {
+                       pr_warn("BNA: debugfs root dir creation failed\n");
+                       return;
+               }
+       }
+
+       /* Setup the pci_dev debugfs directory for the port */
+       snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
+       if (!bnad->port_debugfs_root) {
+               bnad->port_debugfs_root =
+                       debugfs_create_dir(name, bna_debugfs_root);
+               if (!bnad->port_debugfs_root) {
+                       pr_warn("bna pci_dev %s: root dir creation failed\n",
+                               pci_name(bnad->pcidev));
+                       return;
+               }
+
+               atomic_inc(&bna_debugfs_port_count);
+
+               for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+                       file = &bnad_debugfs_files[i];
+                       bnad->bnad_dentry_files[i] =
+                                       debugfs_create_file(file->name,
+                                                       file->mode,
+                                                       bnad->port_debugfs_root,
+                                                       bnad,
+                                                       file->fops);
+                       if (!bnad->bnad_dentry_files[i]) {
+                               pr_warn(
+                                    "BNA pci_dev:%s: create %s entry failed\n",
+                                    pci_name(bnad->pcidev), file->name);
+                               return;
+                       }
+               }
+       }
+}
+
+/* Uninitialize debugfs interface for BNA */
+void
+bnad_debugfs_uninit(struct bnad *bnad)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+               if (bnad->bnad_dentry_files[i]) {
+                       debugfs_remove(bnad->bnad_dentry_files[i]);
+                       bnad->bnad_dentry_files[i] = NULL;
+               }
+       }
+
+       /* Remove the pci_dev debugfs directory for the port */
+       if (bnad->port_debugfs_root) {
+               debugfs_remove(bnad->port_debugfs_root);
+               bnad->port_debugfs_root = NULL;
+               atomic_dec(&bna_debugfs_port_count);
+       }
+
+       /* Remove the BNA debugfs root directory */
+       if (atomic_read(&bna_debugfs_port_count) == 0) {
+               debugfs_remove(bna_debugfs_root);
+               bna_debugfs_root = NULL;
+       }
+}
index fd3dcc1e91453d24db9c8a0b9a6779c8046536f3..5f7be5ac32a1e0868aee6ee6ba2397c49b22df3f 100644 (file)
@@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        struct bfa_ioc_attr *ioc_attr;
        unsigned long flags;
 
-       strcpy(drvinfo->driver, BNAD_NAME);
-       strcpy(drvinfo->version, BNAD_VERSION);
+       strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
 
        ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
        if (ioc_attr) {
@@ -305,12 +305,13 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
                bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-               strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
-                       sizeof(drvinfo->fw_version) - 1);
+               strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+                       sizeof(drvinfo->fw_version));
                kfree(ioc_attr);
        }
 
-       strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+       strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+               sizeof(drvinfo->bus_info));
 }
 
 static void
@@ -934,6 +935,143 @@ bnad_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
+static u32
+bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
+                               u32 *base_offset)
+{
+       struct bfa_flash_attr *flash_attr;
+       struct bnad_iocmd_comp fcomp;
+       u32 i, flash_part = 0, ret;
+       unsigned long flags = 0;
+
+       flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
+       if (!flash_attr)
+               return -ENOMEM;
+
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               kfree(flash_attr);
+               goto out_err;
+       }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       ret = fcomp.comp_status;
+
+       /* Check for the flash type & base offset value */
+       if (ret == BFA_STATUS_OK) {
+               for (i = 0; i < flash_attr->npart; i++) {
+                       if (offset >= flash_attr->part[i].part_off &&
+                           offset < (flash_attr->part[i].part_off +
+                                     flash_attr->part[i].part_size)) {
+                               flash_part = flash_attr->part[i].part_type;
+                               *base_offset = flash_attr->part[i].part_off;
+                               break;
+                       }
+               }
+       }
+       kfree(flash_attr);
+       return flash_part;
+out_err:
+       return -EINVAL;
+}
+
+static int
+bnad_get_eeprom_len(struct net_device *netdev)
+{
+       return BFA_TOTAL_FLASH_SIZE;
+}
+
+static int
+bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+               u8 *bytes)
+{
+       struct bnad *bnad = netdev_priv(netdev);
+       struct bnad_iocmd_comp fcomp;
+       u32 flash_part = 0, base_offset = 0;
+       unsigned long flags = 0;
+       int ret = 0;
+
+       /* Check if the flash read request is valid */
+       if (eeprom->magic != (bnad->pcidev->vendor |
+                            (bnad->pcidev->device << 16)))
+               return -EFAULT;
+
+       /* Query the flash partition based on the offset */
+       flash_part = bnad_get_flash_partition_by_offset(bnad,
+                               eeprom->offset, &base_offset);
+       if (flash_part <= 0)
+               return -EFAULT;
+
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
+                               bnad->id, bytes, eeprom->len,
+                               eeprom->offset - base_offset,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto done;
+       }
+
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       ret = fcomp.comp_status;
+done:
+       return ret;
+}
+
+static int
+bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+               u8 *bytes)
+{
+       struct bnad *bnad = netdev_priv(netdev);
+       struct bnad_iocmd_comp fcomp;
+       u32 flash_part = 0, base_offset = 0;
+       unsigned long flags = 0;
+       int ret = 0;
+
+       /* Check if the flash update request is valid */
+       if (eeprom->magic != (bnad->pcidev->vendor |
+                            (bnad->pcidev->device << 16)))
+               return -EINVAL;
+
+       /* Query the flash partition based on the offset */
+       flash_part = bnad_get_flash_partition_by_offset(bnad,
+                               eeprom->offset, &base_offset);
+       if (flash_part <= 0)
+               return -EFAULT;
+
+       fcomp.bnad = bnad;
+       fcomp.comp_status = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
+                               bnad->id, bytes, eeprom->len,
+                               eeprom->offset - base_offset,
+                               bnad_cb_completion, &fcomp);
+       if (ret != BFA_STATUS_OK) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               goto done;
+       }
+
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       wait_for_completion(&fcomp.comp);
+       ret = fcomp.comp_status;
+done:
+       return ret;
+}
+
 static struct ethtool_ops bnad_ethtool_ops = {
        .get_settings = bnad_get_settings,
        .set_settings = bnad_set_settings,
@@ -948,7 +1086,10 @@ static struct ethtool_ops bnad_ethtool_ops = {
        .set_pauseparam = bnad_set_pauseparam,
        .get_strings = bnad_get_strings,
        .get_ethtool_stats = bnad_get_ethtool_stats,
-       .get_sset_count = bnad_get_sset_count
+       .get_sset_count = bnad_get_sset_count,
+       .get_eeprom_len = bnad_get_eeprom_len,
+       .get_eeprom = bnad_get_eeprom,
+       .set_eeprom = bnad_set_eeprom,
 };
 
 void
index 1b3e90dfbd9a40d4cb9c0614411e83f1605b2d7b..32e8f178ab76a964af67dcce189e8ec84195ab42 100644 (file)
@@ -43,8 +43,7 @@ extern char bfa_version[];
 
 #pragma pack(1)
 
-#define MAC_ADDRLEN    (6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
 
 #pragma pack()
 
index 98849a1fc749995070dadd607175f55ef89d2fa0..b48378a41e492ce3df245214235c25b901a436c1 100644 (file)
@@ -7,6 +7,7 @@ config HAVE_NET_MACB
 
 config NET_ATMEL
        bool "Atmel devices"
+       default y
        depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
new file mode 100644 (file)
index 0000000..aba435c
--- /dev/null
@@ -0,0 +1,7 @@
+config NET_CALXEDA_XGMAC
+       tristate "Calxeda 1G/10G XGMAC Ethernet driver"
+       depends on HAS_IOMEM
+       select CRC32
+       help
+         This is the driver for the XGMAC Ethernet IP block found on Calxeda
+         Highbank platforms.
diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile
new file mode 100644 (file)
index 0000000..f0ef080
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
new file mode 100644 (file)
index 0000000..107c1b0
--- /dev/null
@@ -0,0 +1,1928 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* XGMAC Register definitions */
+#define XGMAC_CONTROL          0x00000000      /* MAC Configuration */
+#define XGMAC_FRAME_FILTER     0x00000004      /* MAC Frame Filter */
+#define XGMAC_FLOW_CTRL                0x00000018      /* MAC Flow Control */
+#define XGMAC_VLAN_TAG         0x0000001C      /* VLAN Tags */
+#define XGMAC_VERSION          0x00000020      /* Version */
+#define XGMAC_VLAN_INCL                0x00000024      /* VLAN tag for tx frames */
+#define XGMAC_LPI_CTRL         0x00000028      /* LPI Control and Status */
+#define XGMAC_LPI_TIMER                0x0000002C      /* LPI Timers Control */
+#define XGMAC_TX_PACE          0x00000030      /* Transmit Pace and Stretch */
+#define XGMAC_VLAN_HASH                0x00000034      /* VLAN Hash Table */
+#define XGMAC_DEBUG            0x00000038      /* Debug */
+#define XGMAC_INT_STAT         0x0000003C      /* Interrupt and Control */
+#define XGMAC_ADDR_HIGH(reg)   (0x00000040 + ((reg) * 8))
+#define XGMAC_ADDR_LOW(reg)    (0x00000044 + ((reg) * 8))
+#define XGMAC_HASH(n)          (0x00000300 + (n) * 4) /* HASH table regs */
+#define XGMAC_NUM_HASH         16
+#define XGMAC_OMR              0x00000400
+#define XGMAC_REMOTE_WAKE      0x00000700      /* Remote Wake-Up Frm Filter */
+#define XGMAC_PMT              0x00000704      /* PMT Control and Status */
+#define XGMAC_MMC_CTRL         0x00000800      /* XGMAC MMC Control */
+#define XGMAC_MMC_INTR_RX      0x00000804      /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_TX      0x00000808      /* Transmit Interrupt */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c      /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_TX 0x00000810      /* Transmit Interrupt Mask */
+
+/* Hardware TX Statistics Counters */
+#define XGMAC_MMC_TXOCTET_GB_LO        0x00000814
+#define XGMAC_MMC_TXOCTET_GB_HI        0x00000818
+#define XGMAC_MMC_TXFRAME_GB_LO        0x0000081C
+#define XGMAC_MMC_TXFRAME_GB_HI        0x00000820
+#define XGMAC_MMC_TXBCFRAME_G  0x00000824
+#define XGMAC_MMC_TXMCFRAME_G  0x0000082C
+#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
+#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
+#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
+#define XGMAC_MMC_TXUNDERFLOW  0x0000087C
+#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
+#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
+#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
+#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
+#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
+#define XGMAC_MMC_TXVLANFRAME  0x0000089C
+
+/* Hardware RX Statistics Counters */
+#define XGMAC_MMC_RXFRAME_GB_LO        0x00000900
+#define XGMAC_MMC_RXFRAME_GB_HI        0x00000904
+#define XGMAC_MMC_RXOCTET_GB_LO        0x00000908
+#define XGMAC_MMC_RXOCTET_GB_HI        0x0000090C
+#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
+#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
+#define XGMAC_MMC_RXBCFRAME_G  0x00000918
+#define XGMAC_MMC_RXMCFRAME_G  0x00000920
+#define XGMAC_MMC_RXCRCERR     0x00000928
+#define XGMAC_MMC_RXRUNT       0x00000930
+#define XGMAC_MMC_RXJABBER     0x00000934
+#define XGMAC_MMC_RXUCFRAME_G  0x00000970
+#define XGMAC_MMC_RXLENGTHERR  0x00000978
+#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
+#define XGMAC_MMC_RXOVERFLOW   0x00000990
+#define XGMAC_MMC_RXVLANFRAME  0x00000998
+#define XGMAC_MMC_RXWATCHDOG   0x000009a0
+
+/* DMA Control and Status Registers */
+#define XGMAC_DMA_BUS_MODE     0x00000f00      /* Bus Mode */
+#define XGMAC_DMA_TX_POLL      0x00000f04      /* Transmit Poll Demand */
+#define XGMAC_DMA_RX_POLL      0x00000f08      /* Received Poll Demand */
+#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c      /* Receive List Base */
+#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10      /* Transmit List Base */
+#define XGMAC_DMA_STATUS       0x00000f14      /* Status Register */
+#define XGMAC_DMA_CONTROL      0x00000f18      /* Ctrl (Operational Mode) */
+#define XGMAC_DMA_INTR_ENA     0x00000f1c      /* Interrupt Enable */
+#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20    /* Missed Frame Counter */
+#define XGMAC_DMA_RI_WDOG_TIMER        0x00000f24      /* RX Intr Watchdog Timer */
+#define XGMAC_DMA_AXI_BUS      0x00000f28      /* AXI Bus Mode */
+#define XGMAC_DMA_AXI_STATUS   0x00000f2C      /* AXI Status */
+#define XGMAC_DMA_HW_FEATURE   0x00000f58      /* Enabled Hardware Features */
+
+#define XGMAC_ADDR_AE          0x80000000
+#define XGMAC_MAX_FILTER_ADDR  31
+
+/* PMT Control and Status */
+#define XGMAC_PMT_POINTER_RESET        0x80000000
+#define XGMAC_PMT_GLBL_UNICAST 0x00000200
+#define XGMAC_PMT_WAKEUP_RX_FRM        0x00000040
+#define XGMAC_PMT_MAGIC_PKT    0x00000020
+#define XGMAC_PMT_WAKEUP_FRM_EN        0x00000004
+#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
+#define XGMAC_PMT_POWERDOWN    0x00000001
+
+#define XGMAC_CONTROL_SPD      0x40000000      /* Speed control */
+#define XGMAC_CONTROL_SPD_MASK 0x60000000
+#define XGMAC_CONTROL_SPD_1G   0x60000000
+#define XGMAC_CONTROL_SPD_2_5G 0x40000000
+#define XGMAC_CONTROL_SPD_10G  0x00000000
+#define XGMAC_CONTROL_SARC     0x10000000      /* Source Addr Insert/Replace */
+#define XGMAC_CONTROL_SARK_MASK        0x18000000
+#define XGMAC_CONTROL_CAR      0x04000000      /* CRC Addition/Replacement */
+#define XGMAC_CONTROL_CAR_MASK 0x06000000
+#define XGMAC_CONTROL_DP       0x01000000      /* Disable Padding */
+#define XGMAC_CONTROL_WD       0x00800000      /* Disable Watchdog on rx */
+#define XGMAC_CONTROL_JD       0x00400000      /* Jabber disable */
+#define XGMAC_CONTROL_JE       0x00100000      /* Jumbo frame */
+#define XGMAC_CONTROL_LM       0x00001000      /* Loop-back mode */
+#define XGMAC_CONTROL_IPC      0x00000400      /* Checksum Offload */
+#define XGMAC_CONTROL_ACS      0x00000080      /* Automatic Pad/FCS Strip */
+#define XGMAC_CONTROL_DDIC     0x00000010      /* Disable Deficit Idle Count */
+#define XGMAC_CONTROL_TE       0x00000008      /* Transmitter Enable */
+#define XGMAC_CONTROL_RE       0x00000004      /* Receiver Enable */
+
+/* XGMAC Frame Filter defines */
+#define XGMAC_FRAME_FILTER_PR  0x00000001      /* Promiscuous Mode */
+#define XGMAC_FRAME_FILTER_HUC 0x00000002      /* Hash Unicast */
+#define XGMAC_FRAME_FILTER_HMC 0x00000004      /* Hash Multicast */
+#define XGMAC_FRAME_FILTER_DAIF        0x00000008      /* DA Inverse Filtering */
+#define XGMAC_FRAME_FILTER_PM  0x00000010      /* Pass all multicast */
+#define XGMAC_FRAME_FILTER_DBF 0x00000020      /* Disable Broadcast frames */
+#define XGMAC_FRAME_FILTER_SAIF        0x00000100      /* Inverse Filtering */
+#define XGMAC_FRAME_FILTER_SAF 0x00000200      /* Source Address Filter */
+#define XGMAC_FRAME_FILTER_HPF 0x00000400      /* Hash or perfect Filter */
+#define XGMAC_FRAME_FILTER_VHF 0x00000800      /* VLAN Hash Filter */
+#define XGMAC_FRAME_FILTER_VPF 0x00001000      /* VLAN Perfect Filter */
+#define XGMAC_FRAME_FILTER_RA  0x80000000      /* Receive all mode */
+
+/* XGMAC FLOW CTRL defines */
+#define XGMAC_FLOW_CTRL_PT_MASK        0xffff0000      /* Pause Time Mask */
+#define XGMAC_FLOW_CTRL_PT_SHIFT       16
+#define XGMAC_FLOW_CTRL_DZQP   0x00000080      /* Disable Zero-Quanta Phase */
+#define XGMAC_FLOW_CTRL_PLT    0x00000020      /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030    /* PLT MASK */
+#define XGMAC_FLOW_CTRL_UP     0x00000008      /* Unicast Pause Frame Detect */
+#define XGMAC_FLOW_CTRL_RFE    0x00000004      /* Rx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_TFE    0x00000002      /* Tx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_FCB_BPA        0x00000001      /* Flow Control Busy ... */
+
+/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMT     0x0080          /* PMT Interrupt Status */
+#define XGMAC_INT_STAT_LPI     0x0040          /* LPI Interrupt Status */
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001      /* Software Reset */
+#define DMA_BUS_MODE_DSL_MASK  0x0000007c      /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2               /* (in DWORDS) */
+#define DMA_BUS_MODE_ATDS      0x00000080      /* Alternate Descriptor Size */
+
+/* Programmable burst length */
+#define DMA_BUS_MODE_PBL_MASK  0x00003f00      /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_FB                0x00010000      /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000      /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT        17
+#define DMA_BUS_MODE_USP       0x00800000
+#define DMA_BUS_MODE_8PBL      0x01000000
+#define DMA_BUS_MODE_AAL       0x02000000
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK  0x0000c000      /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB             0x00010000      /* Fixed Burst */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST         0x00002000      /* Start/Stop Transmission */
+#define DMA_CONTROL_SR         0x00000002      /* Start/Stop Receive */
+#define DMA_CONTROL_DFF                0x01000000      /* Disable flush of rx frames */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE       0x00010000      /* Normal Summary */
+#define DMA_INTR_ENA_AIE       0x00008000      /* Abnormal Summary */
+#define DMA_INTR_ENA_ERE       0x00004000      /* Early Receive */
+#define DMA_INTR_ENA_FBE       0x00002000      /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE       0x00000400      /* Early Transmit */
+#define DMA_INTR_ENA_RWE       0x00000200      /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE       0x00000100      /* Receive Stopped */
+#define DMA_INTR_ENA_RUE       0x00000080      /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_RIE       0x00000040      /* Receive Interrupt */
+#define DMA_INTR_ENA_UNE       0x00000020      /* Tx Underflow */
+#define DMA_INTR_ENA_OVE       0x00000010      /* Receive Overflow */
+#define DMA_INTR_ENA_TJE       0x00000008      /* Transmit Jabber */
+#define DMA_INTR_ENA_TUE       0x00000004      /* Transmit Buffer Unavail */
+#define DMA_INTR_ENA_TSE       0x00000002      /* Transmit Stopped */
+#define DMA_INTR_ENA_TIE       0x00000001      /* Transmit Interrupt */
+
+#define DMA_INTR_NORMAL                (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+                                DMA_INTR_ENA_TUE)
+
+#define DMA_INTR_ABNORMAL      (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+                                DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
+                                DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
+                                DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
+                                DMA_INTR_ENA_TSE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK  (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GMI         0x08000000      /* MMC interrupt */
+#define DMA_STATUS_GLI         0x04000000      /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK     0x00380000      /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000      /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000      /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK     0x00700000      /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT    20
+#define DMA_STATUS_RS_MASK     0x000e0000      /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT    17
+#define DMA_STATUS_NIS         0x00010000      /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS         0x00008000      /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI         0x00004000      /* Early Receive Interrupt */
+#define DMA_STATUS_FBI         0x00002000      /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI         0x00000400      /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT         0x00000200      /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS         0x00000100      /* Receive Process Stopped */
+#define DMA_STATUS_RU          0x00000080      /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI          0x00000040      /* Receive Interrupt */
+#define DMA_STATUS_UNF         0x00000020      /* Transmit Underflow */
+#define DMA_STATUS_OVF         0x00000010      /* Receive Overflow */
+#define DMA_STATUS_TJT         0x00000008      /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU          0x00000004      /* Transmit Buffer Unavail */
+#define DMA_STATUS_TPS         0x00000002      /* Transmit Process Stopped */
+#define DMA_STATUS_TI          0x00000001      /* Transmit Interrupt */
+
+/* Common MAC defines */
+#define MAC_ENABLE_TX          0x00000008      /* Transmitter Enable */
+#define MAC_ENABLE_RX          0x00000004      /* Receiver Enable */
+
+/* XGMAC Operation Mode Register */
+#define XGMAC_OMR_TSF          0x00200000      /* TX FIFO Store and Forward */
+#define XGMAC_OMR_FTF          0x00100000      /* Flush Transmit FIFO */
+#define XGMAC_OMR_TTC          0x00020000      /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC_MASK     0x00030000
+#define XGMAC_OMR_RFD          0x00006000      /* FC Deactivation Threshhold */
+#define XGMAC_OMR_RFD_MASK     0x00007000      /* FC Deact Threshhold MASK */
+#define XGMAC_OMR_RFA          0x00000600      /* FC Activation Threshhold */
+#define XGMAC_OMR_RFA_MASK     0x00000E00      /* FC Act Threshhold MASK */
+#define XGMAC_OMR_EFC          0x00000100      /* Enable Hardware FC */
+#define XGMAC_OMR_FEF          0x00000080      /* Forward Error Frames */
+#define XGMAC_OMR_DT           0x00000040      /* Drop TCP/IP csum Errors */
+#define XGMAC_OMR_RSF          0x00000020      /* RX FIFO Store and Forward */
+#define XGMAC_OMR_RTC          0x00000010      /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_MASK     0x00000018      /* RX Threshhold Ctrl MASK */
+
+/* XGMAC HW Features Register */
+#define DMA_HW_FEAT_TXCOESEL   0x00010000      /* TX Checksum offload */
+
+#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* XGMAC Descriptor Defines */
+#define MAX_DESC_BUF_SZ                (0x2000 - 8)
+
+#define RXDESC_EXT_STATUS      0x00000001
+#define RXDESC_CRC_ERR         0x00000002
+#define RXDESC_RX_ERR          0x00000008
+#define RXDESC_RX_WDOG         0x00000010
+#define RXDESC_FRAME_TYPE      0x00000020
+#define RXDESC_GIANT_FRAME     0x00000080
+#define RXDESC_LAST_SEG                0x00000100
+#define RXDESC_FIRST_SEG       0x00000200
+#define RXDESC_VLAN_FRAME      0x00000400
+#define RXDESC_OVERFLOW_ERR    0x00000800
+#define RXDESC_LENGTH_ERR      0x00001000
+#define RXDESC_SA_FILTER_FAIL  0x00002000
+#define RXDESC_DESCRIPTOR_ERR  0x00004000
+#define RXDESC_ERROR_SUMMARY   0x00008000
+#define RXDESC_FRAME_LEN_OFFSET        16
+#define RXDESC_FRAME_LEN_MASK  0x3fff0000
+#define RXDESC_DA_FILTER_FAIL  0x40000000
+
+#define RXDESC1_END_RING       0x00008000
+
+#define RXDESC_IP_PAYLOAD_MASK 0x00000003
+#define RXDESC_IP_PAYLOAD_UDP  0x00000001
+#define RXDESC_IP_PAYLOAD_TCP  0x00000002
+#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
+#define RXDESC_IP_HEADER_ERR   0x00000008
+#define RXDESC_IP_PAYLOAD_ERR  0x00000010
+#define RXDESC_IPV4_PACKET     0x00000040
+#define RXDESC_IPV6_PACKET     0x00000080
+#define TXDESC_UNDERFLOW_ERR   0x00000001
+#define TXDESC_JABBER_TIMEOUT  0x00000002
+#define TXDESC_LOCAL_FAULT     0x00000004
+#define TXDESC_REMOTE_FAULT    0x00000008
+#define TXDESC_VLAN_FRAME      0x00000010
+#define TXDESC_FRAME_FLUSHED   0x00000020
+#define TXDESC_IP_HEADER_ERR   0x00000040
+#define TXDESC_PAYLOAD_CSUM_ERR        0x00000080
+#define TXDESC_ERROR_SUMMARY   0x00008000
+#define TXDESC_SA_CTRL_INSERT  0x00040000
+#define TXDESC_SA_CTRL_REPLACE 0x00080000
+#define TXDESC_2ND_ADDR_CHAINED        0x00100000
+#define TXDESC_END_RING                0x00200000
+#define TXDESC_CSUM_IP         0x00400000
+#define TXDESC_CSUM_IP_PAYLD   0x00800000
+#define TXDESC_CSUM_ALL                0x00C00000
+#define TXDESC_CRC_EN_REPLACE  0x01000000
+#define TXDESC_CRC_EN_APPEND   0x02000000
+#define TXDESC_DISABLE_PAD     0x04000000
+#define TXDESC_FIRST_SEG       0x10000000
+#define TXDESC_LAST_SEG                0x20000000
+#define TXDESC_INTERRUPT       0x40000000
+
+#define DESC_OWN               0x80000000
+#define DESC_BUFFER1_SZ_MASK   0x00001fff
+#define DESC_BUFFER2_SZ_MASK   0x1fff0000
+#define DESC_BUFFER2_SZ_OFFSET 16
+
+struct xgmac_dma_desc {
+       __le32 flags;
+       __le32 buf_size;
+       __le32 buf1_addr;               /* Buffer 1 Address Pointer */
+       __le32 buf2_addr;               /* Buffer 2 Address Pointer */
+       __le32 ext_status;
+       __le32 res[3];
+};
+
+struct xgmac_extra_stats {
+       /* Transmit errors */
+       unsigned long tx_jabber;
+       unsigned long tx_frame_flushed;
+       unsigned long tx_payload_error;
+       unsigned long tx_ip_header_error;
+       unsigned long tx_local_fault;
+       unsigned long tx_remote_fault;
+       /* Receive errors */
+       unsigned long rx_watchdog;
+       unsigned long rx_da_filter_fail;
+       unsigned long rx_sa_filter_fail;
+       unsigned long rx_payload_error;
+       unsigned long rx_ip_header_error;
+       /* Tx/Rx IRQ errors */
+       unsigned long tx_undeflow;
+       unsigned long tx_process_stopped;
+       unsigned long rx_buf_unav;
+       unsigned long rx_process_stopped;
+       unsigned long tx_early;
+       unsigned long fatal_bus_error;
+};
+
+struct xgmac_priv {
+       struct xgmac_dma_desc *dma_rx;
+       struct sk_buff **rx_skbuff;
+       unsigned int rx_tail;
+       unsigned int rx_head;
+
+       struct xgmac_dma_desc *dma_tx;
+       struct sk_buff **tx_skbuff;
+       unsigned int tx_head;
+       unsigned int tx_tail;
+
+       void __iomem *base;
+       struct sk_buff_head rx_recycle;
+       unsigned int dma_buf_sz;
+       dma_addr_t dma_rx_phy;
+       dma_addr_t dma_tx_phy;
+
+       struct net_device *dev;
+       struct device *device;
+       struct napi_struct napi;
+
+       struct xgmac_extra_stats xstats;
+
+       spinlock_t stats_lock;
+       int pmt_irq;
+       char rx_pause;
+       char tx_pause;
+       int wolopts;
+};
+
+/* XGMAC Configuration Settings */
+#define MAX_MTU                        9000
+#define PAUSE_TIME             0x400
+
+#define DMA_RX_RING_SZ         256
+#define DMA_TX_RING_SZ         128
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TX_THRESH              (DMA_TX_RING_SZ/4)
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s)    (((n) + 1) & ((s) - 1))
+#define dma_ring_space(h, t, s)        CIRC_SPACE(h, t, s)
+#define dma_ring_cnt(h, t, s)  CIRC_CNT(h, t, s)
+
+/* XGMAC Descriptor Access Helpers */
+static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
+{
+       if (buf_sz > MAX_DESC_BUF_SZ)
+               p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
+                       (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
+       else
+               p->buf_size = cpu_to_le32(buf_sz);
+}
+
+static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
+{
+       u32 len = cpu_to_le32(p->flags);
+       return (len & DESC_BUFFER1_SZ_MASK) +
+               ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
+}
+
+static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
+                                    int buf_sz)
+{
+       struct xgmac_dma_desc *end = p + ring_size - 1;
+
+       memset(p, 0, sizeof(*p) * ring_size);
+
+       for (; p <= end; p++)
+               desc_set_buf_len(p, buf_sz);
+
+       end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
+}
+
+static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
+{
+       memset(p, 0, sizeof(*p) * ring_size);
+       p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
+}
+
+static inline int desc_get_owner(struct xgmac_dma_desc *p)
+{
+       return le32_to_cpu(p->flags) & DESC_OWN;
+}
+
+static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
+{
+       /* Clear all fields and set the owner */
+       p->flags = cpu_to_le32(DESC_OWN);
+}
+
+static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
+{
+       u32 tmpflags = le32_to_cpu(p->flags);
+       tmpflags &= TXDESC_END_RING;
+       tmpflags |= flags | DESC_OWN;
+       p->flags = cpu_to_le32(tmpflags);
+}
+
+static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
+{
+       return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
+}
+
+static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
+{
+       return le32_to_cpu(p->buf1_addr);
+}
+
+static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
+                                    u32 paddr, int len)
+{
+       p->buf1_addr = cpu_to_le32(paddr);
+       if (len > MAX_DESC_BUF_SZ)
+               p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
+}
+
+static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
+                                             u32 paddr, int len)
+{
+       desc_set_buf_len(p, len);
+       desc_set_buf_addr(p, paddr, len);
+}
+
+static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
+{
+       u32 data = le32_to_cpu(p->flags);
+       u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
+       if (data & RXDESC_FRAME_TYPE)
+               len -= ETH_FCS_LEN;
+
+       return len;
+}
+
+static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+       int timeout = 1000;
+       u32 reg = readl(ioaddr + XGMAC_OMR);
+       writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
+
+       while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
+               udelay(1);
+}
+
+static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+       struct xgmac_extra_stats *x = &priv->xstats;
+       u32 status = le32_to_cpu(p->flags);
+
+       if (!(status & TXDESC_ERROR_SUMMARY))
+               return 0;
+
+       netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
+       if (status & TXDESC_JABBER_TIMEOUT)
+               x->tx_jabber++;
+       if (status & TXDESC_FRAME_FLUSHED)
+               x->tx_frame_flushed++;
+       if (status & TXDESC_UNDERFLOW_ERR)
+               xgmac_dma_flush_tx_fifo(priv->base);
+       if (status & TXDESC_IP_HEADER_ERR)
+               x->tx_ip_header_error++;
+       if (status & TXDESC_LOCAL_FAULT)
+               x->tx_local_fault++;
+       if (status & TXDESC_REMOTE_FAULT)
+               x->tx_remote_fault++;
+       if (status & TXDESC_PAYLOAD_CSUM_ERR)
+               x->tx_payload_error++;
+
+       return -1;
+}
+
+static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+       struct xgmac_extra_stats *x = &priv->xstats;
+       int ret = CHECKSUM_UNNECESSARY;
+       u32 status = le32_to_cpu(p->flags);
+       u32 ext_status = le32_to_cpu(p->ext_status);
+
+       if (status & RXDESC_DA_FILTER_FAIL) {
+               netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
+               x->rx_da_filter_fail++;
+               return -1;
+       }
+
+       /* Check if packet has checksum already */
+       if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+               !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+               ret = CHECKSUM_NONE;
+
+       netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
+                  (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
+
+       if (!(status & RXDESC_ERROR_SUMMARY))
+               return ret;
+
+       /* Handle any errors */
+       if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
+               RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
+               return -1;
+
+       if (status & RXDESC_EXT_STATUS) {
+               if (ext_status & RXDESC_IP_HEADER_ERR)
+                       x->rx_ip_header_error++;
+               if (ext_status & RXDESC_IP_PAYLOAD_ERR)
+                       x->rx_payload_error++;
+               netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
+                          ext_status);
+               return CHECKSUM_NONE;
+       }
+
+       return ret;
+}
+
+static inline void xgmac_mac_enable(void __iomem *ioaddr)
+{
+       u32 value = readl(ioaddr + XGMAC_CONTROL);
+       value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+       writel(value, ioaddr + XGMAC_CONTROL);
+
+       value = readl(ioaddr + XGMAC_DMA_CONTROL);
+       value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
+       writel(value, ioaddr + XGMAC_DMA_CONTROL);
+}
+
+static inline void xgmac_mac_disable(void __iomem *ioaddr)
+{
+       u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
+       value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+       writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+       value = readl(ioaddr + XGMAC_CONTROL);
+       value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+       writel(value, ioaddr + XGMAC_CONTROL);
+}
+
+static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+                              int num)
+{
+       u32 data;
+
+       data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+       writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+       data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+       writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+}
+
+static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+                              int num)
+{
+       u32 hi_addr, lo_addr;
+
+       /* Read the MAC address from the hardware */
+       hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
+       lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
+
+       /* Extract the MAC address from the high and low words */
+       addr[0] = lo_addr & 0xff;
+       addr[1] = (lo_addr >> 8) & 0xff;
+       addr[2] = (lo_addr >> 16) & 0xff;
+       addr[3] = (lo_addr >> 24) & 0xff;
+       addr[4] = hi_addr & 0xff;
+       addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
+{
+       u32 reg;
+       unsigned int flow = 0;
+
+       priv->rx_pause = rx;
+       priv->tx_pause = tx;
+
+       if (rx || tx) {
+               if (rx)
+                       flow |= XGMAC_FLOW_CTRL_RFE;
+               if (tx)
+                       flow |= XGMAC_FLOW_CTRL_TFE;
+
+               flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
+               flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
+
+               writel(flow, priv->base + XGMAC_FLOW_CTRL);
+
+               reg = readl(priv->base + XGMAC_OMR);
+               reg |= XGMAC_OMR_EFC;
+               writel(reg, priv->base + XGMAC_OMR);
+       } else {
+               writel(0, priv->base + XGMAC_FLOW_CTRL);
+
+               reg = readl(priv->base + XGMAC_OMR);
+               reg &= ~XGMAC_OMR_EFC;
+               writel(reg, priv->base + XGMAC_OMR);
+       }
+
+       return 0;
+}
+
+static void xgmac_rx_refill(struct xgmac_priv *priv)
+{
+       struct xgmac_dma_desc *p;
+       dma_addr_t paddr;
+
+       while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
+               int entry = priv->rx_head;
+               struct sk_buff *skb;
+
+               p = priv->dma_rx + entry;
+
+               if (priv->rx_skbuff[entry] != NULL)
+                       continue;
+
+               skb = __skb_dequeue(&priv->rx_recycle);
+               if (skb == NULL)
+                       skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+               if (unlikely(skb == NULL))
+                       break;
+
+               priv->rx_skbuff[entry] = skb;
+               paddr = dma_map_single(priv->device, skb->data,
+                                        priv->dma_buf_sz, DMA_FROM_DEVICE);
+               desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+
+               netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
+                       priv->rx_head, priv->rx_tail);
+
+               priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
+               /* Ensure descriptor is in memory before handing to h/w */
+               wmb();
+               desc_set_rx_owner(p);
+       }
+}
+
+/**
+ * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description:  this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static int xgmac_dma_desc_rings_init(struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       unsigned int bfsize;
+
+       /* Set the Buffer size according to the MTU;
+        * indeed, in case of jumbo we need to bump-up the buffer sizes.
+        */
+       bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
+                      64);
+
+       netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
+
+       priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+                                 GFP_KERNEL);
+       if (!priv->rx_skbuff)
+               return -ENOMEM;
+
+       priv->dma_rx = dma_alloc_coherent(priv->device,
+                                         DMA_RX_RING_SZ *
+                                         sizeof(struct xgmac_dma_desc),
+                                         &priv->dma_rx_phy,
+                                         GFP_KERNEL);
+       if (!priv->dma_rx)
+               goto err_dma_rx;
+
+       priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+                                 GFP_KERNEL);
+       if (!priv->tx_skbuff)
+               goto err_tx_skb;
+
+       priv->dma_tx = dma_alloc_coherent(priv->device,
+                                         DMA_TX_RING_SZ *
+                                         sizeof(struct xgmac_dma_desc),
+                                         &priv->dma_tx_phy,
+                                         GFP_KERNEL);
+       if (!priv->dma_tx)
+               goto err_dma_tx;
+
+       netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
+           "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+           priv->dma_rx, priv->dma_tx,
+           (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+       priv->rx_tail = 0;
+       priv->rx_head = 0;
+       priv->dma_buf_sz = bfsize;
+       desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
+       xgmac_rx_refill(priv);
+
+       priv->tx_tail = 0;
+       priv->tx_head = 0;
+       desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+
+       writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
+       writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
+
+       return 0;
+
+err_dma_tx:
+       kfree(priv->tx_skbuff);
+err_tx_skb:
+       dma_free_coherent(priv->device,
+                         DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+                         priv->dma_rx, priv->dma_rx_phy);
+err_dma_rx:
+       kfree(priv->rx_skbuff);
+       return -ENOMEM;
+}
+
+static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
+{
+       int i;
+       struct xgmac_dma_desc *p;
+
+       if (!priv->rx_skbuff)
+               return;
+
+       for (i = 0; i < DMA_RX_RING_SZ; i++) {
+               if (priv->rx_skbuff[i] == NULL)
+                       continue;
+
+               p = priv->dma_rx + i;
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                priv->dma_buf_sz, DMA_FROM_DEVICE);
+               dev_kfree_skb_any(priv->rx_skbuff[i]);
+               priv->rx_skbuff[i] = NULL;
+       }
+}
+
+static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
+{
+       int i, f;
+       struct xgmac_dma_desc *p;
+
+       if (!priv->tx_skbuff)
+               return;
+
+       for (i = 0; i < DMA_TX_RING_SZ; i++) {
+               if (priv->tx_skbuff[i] == NULL)
+                       continue;
+
+               p = priv->dma_tx + i;
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                desc_get_buf_len(p), DMA_TO_DEVICE);
+
+               for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
+                       p = priv->dma_tx + i++;
+                       dma_unmap_page(priv->device, desc_get_buf_addr(p),
+                                      desc_get_buf_len(p), DMA_TO_DEVICE);
+               }
+
+               dev_kfree_skb_any(priv->tx_skbuff[i]);
+               priv->tx_skbuff[i] = NULL;
+       }
+}
+
+static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
+{
+       /* Release the DMA TX/RX socket buffers */
+       xgmac_free_rx_skbufs(priv);
+       xgmac_free_tx_skbufs(priv);
+
+       /* Free the consistent memory allocated for descriptor rings */
+       if (priv->dma_tx) {
+               dma_free_coherent(priv->device,
+                                 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
+                                 priv->dma_tx, priv->dma_tx_phy);
+               priv->dma_tx = NULL;
+       }
+       if (priv->dma_rx) {
+               dma_free_coherent(priv->device,
+                                 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+                                 priv->dma_rx, priv->dma_rx_phy);
+               priv->dma_rx = NULL;
+       }
+       kfree(priv->rx_skbuff);
+       priv->rx_skbuff = NULL;
+       kfree(priv->tx_skbuff);
+       priv->tx_skbuff = NULL;
+}
+
+/**
+ * xgmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void xgmac_tx_complete(struct xgmac_priv *priv)
+{
+       int i;
+       void __iomem *ioaddr = priv->base;
+
+       writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
+
+       while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
+               unsigned int entry = priv->tx_tail;
+               struct sk_buff *skb = priv->tx_skbuff[entry];
+               struct xgmac_dma_desc *p = priv->dma_tx + entry;
+
+               /* Check if the descriptor is owned by the DMA. */
+               if (desc_get_owner(p))
+                       break;
+
+               /* Verify tx error by looking at the last segment */
+               if (desc_get_tx_ls(p))
+                       desc_get_tx_status(priv, p);
+
+               netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
+                       priv->tx_head, priv->tx_tail);
+
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                desc_get_buf_len(p), DMA_TO_DEVICE);
+
+               priv->tx_skbuff[entry] = NULL;
+               priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+               if (!skb) {
+                       continue;
+               }
+
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
+                                                             DMA_TX_RING_SZ);
+                       p = priv->dma_tx + priv->tx_tail;
+
+                       dma_unmap_page(priv->device, desc_get_buf_addr(p),
+                                      desc_get_buf_len(p), DMA_TO_DEVICE);
+               }
+
+               /*
+                * If there's room in the queue (limit it to size)
+                * we add this skb back into the pool,
+                * if it's the right size.
+                */
+               if ((skb_queue_len(&priv->rx_recycle) <
+                       DMA_RX_RING_SZ) &&
+                       skb_recycle_check(skb, priv->dma_buf_sz))
+                       __skb_queue_head(&priv->rx_recycle, skb);
+               else
+                       dev_kfree_skb(skb);
+       }
+
+       if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+           TX_THRESH)
+               netif_wake_queue(priv->dev);
+}
+
+/**
+ * xgmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void xgmac_tx_err(struct xgmac_priv *priv)
+{
+       u32 reg, value, inten;
+
+       netif_stop_queue(priv->dev);
+
+       inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+       reg = readl(priv->base + XGMAC_DMA_CONTROL);
+       writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+       do {
+               value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
+       } while (value && (value != 0x600000));
+
+       xgmac_free_tx_skbufs(priv);
+       desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+       priv->tx_tail = 0;
+       priv->tx_head = 0;
+       writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+
+       writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
+               priv->base + XGMAC_DMA_STATUS);
+       writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+
+       netif_wake_queue(priv->dev);
+}
+
+static int xgmac_hw_init(struct net_device *dev)
+{
+       u32 value, ctrl;
+       int limit;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+
+       /* Save the ctrl register value */
+       ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
+
+       /* SW reset */
+       value = DMA_BUS_MODE_SFT_RESET;
+       writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+       limit = 15000;
+       while (limit-- &&
+               (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+               cpu_relax();
+       if (limit < 0)
+               return -EBUSY;
+
+       value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
+               (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
+               DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
+       writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+
+       /* Enable interrupts */
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+       /* XGMAC requires AXI bus init. This is a 'magic number' for now */
+       writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+
+       ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
+               XGMAC_CONTROL_CAR;
+       if (dev->features & NETIF_F_RXCSUM)
+               ctrl |= XGMAC_CONTROL_IPC;
+       writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+       value = DMA_CONTROL_DFF;
+       writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+       /* Set the HW DMA mode and the COE */
+       writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+               ioaddr + XGMAC_OMR);
+
+       /* Reset the MMC counters */
+       writel(1, ioaddr + XGMAC_MMC_CTRL);
+       return 0;
+}
+
+/**
+ *  xgmac_open - open entry point of the driver
+ *  @dev : pointer to the device structure.
+ *  Description:
+ *  This function is the open entry point of the driver.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int xgmac_open(struct net_device *dev)
+{
+       int ret;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+
+       /* Check that the MAC address is valid.  If its not, refuse
+        * to bring the device up. The user must specify an
+        * address using the following linux command:
+        *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
+       if (!is_valid_ether_addr(dev->dev_addr)) {
+               random_ether_addr(dev->dev_addr);
+               netdev_dbg(priv->dev, "generated random MAC address %pM\n",
+                       dev->dev_addr);
+       }
+
+       skb_queue_head_init(&priv->rx_recycle);
+       memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+
+       /* Initialize the XGMAC and descriptors */
+       xgmac_hw_init(dev);
+       xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+       xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
+       ret = xgmac_dma_desc_rings_init(dev);
+       if (ret < 0)
+               return ret;
+
+       /* Enable the MAC Rx/Tx */
+       xgmac_mac_enable(ioaddr);
+
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+/**
+ *  xgmac_release - close entry point of the driver
+ *  @dev : device pointer.
+ *  Description:
+ *  This is the stop entry point of the driver.
+ */
+static int xgmac_stop(struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+
+       if (readl(priv->base + XGMAC_DMA_INTR_ENA))
+               napi_disable(&priv->napi);
+
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+       skb_queue_purge(&priv->rx_recycle);
+
+       /* Disable the MAC core */
+       xgmac_mac_disable(priv->base);
+
+       /* Release and free the Rx/Tx resources */
+       xgmac_free_dma_desc_rings(priv);
+
+       return 0;
+}
+
+/**
+ *  xgmac_xmit:
+ *  @skb : the socket buffer
+ *  @dev : device pointer
+ *  Description : Tx entry point of the driver.
+ */
+static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       unsigned int entry;
+       int i;
+       int nfrags = skb_shinfo(skb)->nr_frags;
+       struct xgmac_dma_desc *desc, *first;
+       unsigned int desc_flags;
+       unsigned int len;
+       dma_addr_t paddr;
+
+       if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+           (nfrags + 1)) {
+               writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
+                       priv->base + XGMAC_DMA_INTR_ENA);
+               netif_stop_queue(dev);
+               return NETDEV_TX_BUSY;
+       }
+
+       desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
+               TXDESC_CSUM_ALL : 0;
+       entry = priv->tx_head;
+       desc = priv->dma_tx + entry;
+       first = desc;
+
+       len = skb_headlen(skb);
+       paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->device, paddr)) {
+               dev_kfree_skb(skb);
+               return -EIO;
+       }
+       priv->tx_skbuff[entry] = skb;
+       desc_set_buf_addr_and_size(desc, paddr, len);
+
+       for (i = 0; i < nfrags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               len = frag->size;
+
+               paddr = skb_frag_dma_map(priv->device, frag, 0, len,
+                                        DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->device, paddr)) {
+                       dev_kfree_skb(skb);
+                       return -EIO;
+               }
+
+               entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+               desc = priv->dma_tx + entry;
+               priv->tx_skbuff[entry] = NULL;
+
+               desc_set_buf_addr_and_size(desc, paddr, len);
+               if (i < (nfrags - 1))
+                       desc_set_tx_owner(desc, desc_flags);
+       }
+
+       /* Interrupt on completition only for the latest segment */
+       if (desc != first)
+               desc_set_tx_owner(desc, desc_flags |
+                       TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+       else
+               desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+
+       /* Set owner on first desc last to avoid race condition */
+       wmb();
+       desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+
+       priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+       writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
+       return NETDEV_TX_OK;
+}
+
+static int xgmac_rx(struct xgmac_priv *priv, int limit)
+{
+       unsigned int entry;
+       unsigned int count = 0;
+       struct xgmac_dma_desc *p;
+
+       while (count < limit) {
+               int ip_checksum;
+               struct sk_buff *skb;
+               int frame_len;
+
+               writel(DMA_STATUS_RI | DMA_STATUS_NIS,
+                      priv->base + XGMAC_DMA_STATUS);
+
+               entry = priv->rx_tail;
+               p = priv->dma_rx + entry;
+               if (desc_get_owner(p))
+                       break;
+
+               count++;
+               priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
+
+               /* read the status of the incoming frame */
+               ip_checksum = desc_get_rx_status(priv, p);
+               if (ip_checksum < 0)
+                       continue;
+
+               skb = priv->rx_skbuff[entry];
+               if (unlikely(!skb)) {
+                       netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
+                       break;
+               }
+               priv->rx_skbuff[entry] = NULL;
+
+               frame_len = desc_get_rx_frame_len(p);
+               netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
+                       frame_len, ip_checksum);
+
+               skb_put(skb, frame_len);
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                frame_len, DMA_FROM_DEVICE);
+
+               skb->protocol = eth_type_trans(skb, priv->dev);
+               skb->ip_summed = ip_checksum;
+               if (ip_checksum == CHECKSUM_NONE)
+                       netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&priv->napi, skb);
+       }
+
+       xgmac_rx_refill(priv);
+
+       writel(1, priv->base + XGMAC_DMA_RX_POLL);
+
+       return count;
+}
+
+/**
+ *  xgmac_poll - xgmac poll method (NAPI)
+ *  @napi : pointer to the napi structure.
+ *  @budget : maximum number of packets that the current CPU can receive from
+ *           all interfaces.
+ *  Description :
+ *   This function implements the the reception process.
+ *   Also it runs the TX completion thread
+ */
+static int xgmac_poll(struct napi_struct *napi, int budget)
+{
+       struct xgmac_priv *priv = container_of(napi,
+                                      struct xgmac_priv, napi);
+       int work_done = 0;
+
+       xgmac_tx_complete(priv);
+       work_done = xgmac_rx(priv, budget);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+       }
+       return work_done;
+}
+
+/**
+ *  xgmac_tx_timeout
+ *  @dev : Pointer to net device structure
+ *  Description: this function is called when a packet transmission fails to
+ *   complete within a reasonable tmrate. The driver will mark the error in the
+ *   netdev structure and arrange for the device to be reset to a sane state
+ *   in order to transmit a new packet.
+ */
+static void xgmac_tx_timeout(struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+
+       /* Clear Tx resources and restart transmitting again */
+       xgmac_tx_err(priv);
+}
+
+/**
+ *  xgmac_set_rx_mode - entry point for multicast addressing
+ *  @dev : pointer to the device structure
+ *  Description:
+ *  This function is a driver entry point which gets called by the kernel
+ *  whenever multicast addresses must be enabled/disabled.
+ *  Return value:
+ *  void.
+ */
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+       int i;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+       unsigned int value = 0;
+       u32 hash_filter[XGMAC_NUM_HASH];
+       int reg = 1;
+       struct netdev_hw_addr *ha;
+       bool use_hash = false;
+
+       netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
+                netdev_mc_count(dev), netdev_uc_count(dev));
+
+       if (dev->flags & IFF_PROMISC) {
+               writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
+               return;
+       }
+
+       memset(hash_filter, 0, sizeof(hash_filter));
+
+       if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+               use_hash = true;
+               value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
+       }
+       netdev_for_each_uc_addr(ha, dev) {
+               if (use_hash) {
+                       u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+                       /* The most significant 4 bits determine the register to
+                        * use (H/L) while the other 5 bits determine the bit
+                        * within the register. */
+                       hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+               } else {
+                       xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+                       reg++;
+               }
+       }
+
+       if (dev->flags & IFF_ALLMULTI) {
+               value |= XGMAC_FRAME_FILTER_PM;
+               goto out;
+       }
+
+       if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+               use_hash = true;
+               value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+       }
+       netdev_for_each_mc_addr(ha, dev) {
+               if (use_hash) {
+                       u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+                       /* The most significant 4 bits determine the register to
+                        * use (H/L) while the other 5 bits determine the bit
+                        * within the register. */
+                       hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+               } else {
+                       xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+                       reg++;
+               }
+       }
+
+out:
+       for (i = 0; i < XGMAC_NUM_HASH; i++)
+               writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
+
+       writel(value, ioaddr + XGMAC_FRAME_FILTER);
+}
+
+/**
+ *  xgmac_change_mtu - entry point to change MTU size for the device.
+ *  @dev : device pointer.
+ *  @new_mtu : the new MTU size for the device.
+ *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ *  to drive packet transmission. Ethernet has an MTU of 1500 octets
+ *  (ETH_DATA_LEN). This value can be changed with ifconfig.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       int old_mtu;
+
+       if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
+               netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
+               return -EINVAL;
+       }
+
+       old_mtu = dev->mtu;
+       dev->mtu = new_mtu;
+
+       /* return early if the buffer sizes will not change */
+       if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+               return 0;
+       if (old_mtu == new_mtu)
+               return 0;
+
+       /* Stop everything, get ready to change the MTU */
+       if (!netif_running(dev))
+               return 0;
+
+       /* Bring the interface down and then back up */
+       xgmac_stop(dev);
+       return xgmac_open(dev);
+}
+
+static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
+{
+       u32 intr_status;
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+
+       intr_status = readl(ioaddr + XGMAC_INT_STAT);
+       if (intr_status & XGMAC_INT_STAT_PMT) {
+               netdev_dbg(priv->dev, "received Magic frame\n");
+               /* clear the PMT bits 5 and 6 by reading the PMT */
+               readl(ioaddr + XGMAC_PMT);
+       }
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
+{
+       u32 intr_status;
+       bool tx_err = false;
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       struct xgmac_extra_stats *x = &priv->xstats;
+
+       /* read the status register (CSR5) */
+       intr_status = readl(priv->base + XGMAC_DMA_STATUS);
+       intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
+       writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+
+       /* It displays the DMA process states (CSR5 register) */
+       /* ABNORMAL interrupts */
+       if (unlikely(intr_status & DMA_STATUS_AIS)) {
+               if (intr_status & DMA_STATUS_TJT) {
+                       netdev_err(priv->dev, "transmit jabber\n");
+                       x->tx_jabber++;
+               }
+               if (intr_status & DMA_STATUS_RU)
+                       x->rx_buf_unav++;
+               if (intr_status & DMA_STATUS_RPS) {
+                       netdev_err(priv->dev, "receive process stopped\n");
+                       x->rx_process_stopped++;
+               }
+               if (intr_status & DMA_STATUS_ETI) {
+                       netdev_err(priv->dev, "transmit early interrupt\n");
+                       x->tx_early++;
+               }
+               if (intr_status & DMA_STATUS_TPS) {
+                       netdev_err(priv->dev, "transmit process stopped\n");
+                       x->tx_process_stopped++;
+                       tx_err = true;
+               }
+               if (intr_status & DMA_STATUS_FBI) {
+                       netdev_err(priv->dev, "fatal bus error\n");
+                       x->fatal_bus_error++;
+                       tx_err = true;
+               }
+
+               if (tx_err)
+                       xgmac_tx_err(priv);
+       }
+
+       /* TX/RX NORMAL interrupts */
+       if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+               writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+               napi_schedule(&priv->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void xgmac_poll_controller(struct net_device *dev)
+{
+       disable_irq(dev->irq);
+       xgmac_interrupt(dev->irq, dev);
+       enable_irq(dev->irq);
+}
+#endif
+
+struct rtnl_link_stats64 *
+xgmac_get_stats64(struct net_device *dev,
+                      struct rtnl_link_stats64 *storage)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *base = priv->base;
+       u32 count;
+
+       spin_lock_bh(&priv->stats_lock);
+       writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
+
+       storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
+       storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
+
+       storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
+       storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
+       storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
+       storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
+       storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
+
+       storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
+       storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
+
+       count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
+       storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
+       storage->tx_packets = count;
+       storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
+
+       writel(0, base + XGMAC_MMC_CTRL);
+       spin_unlock_bh(&priv->stats_lock);
+       return storage;
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+       xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+
+       return 0;
+}
+
+static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
+{
+       u32 ctrl;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+       u32 changed = dev->features ^ features;
+
+       if (!(changed & NETIF_F_RXCSUM))
+               return 0;
+
+       ctrl = readl(ioaddr + XGMAC_CONTROL);
+       if (features & NETIF_F_RXCSUM)
+               ctrl |= XGMAC_CONTROL_IPC;
+       else
+               ctrl &= ~XGMAC_CONTROL_IPC;
+       writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+       return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+       .ndo_open = xgmac_open,
+       .ndo_start_xmit = xgmac_xmit,
+       .ndo_stop = xgmac_stop,
+       .ndo_change_mtu = xgmac_change_mtu,
+       .ndo_set_rx_mode = xgmac_set_rx_mode,
+       .ndo_tx_timeout = xgmac_tx_timeout,
+       .ndo_get_stats64 = xgmac_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = xgmac_poll_controller,
+#endif
+       .ndo_set_mac_address = xgmac_set_mac_address,
+       .ndo_set_features = xgmac_set_features,
+};
+
+static int xgmac_ethtool_getsettings(struct net_device *dev,
+                                         struct ethtool_cmd *cmd)
+{
+       cmd->autoneg = 0;
+       cmd->duplex = DUPLEX_FULL;
+       ethtool_cmd_speed_set(cmd, 10000);
+       cmd->supported = 0;
+       cmd->advertising = 0;
+       cmd->transceiver = XCVR_INTERNAL;
+       return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+                                     struct ethtool_pauseparam *pause)
+{
+       struct xgmac_priv *priv = netdev_priv(netdev);
+
+       pause->rx_pause = priv->rx_pause;
+       pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+                                    struct ethtool_pauseparam *pause)
+{
+       struct xgmac_priv *priv = netdev_priv(netdev);
+
+       if (pause->autoneg)
+               return -EINVAL;
+
+       return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+struct xgmac_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int stat_offset;
+       bool is_reg;
+};
+
+#define XGMAC_STAT(m)  \
+       { #m, offsetof(struct xgmac_priv, xstats.m), false }
+#define XGMAC_HW_STAT(m, reg_offset)   \
+       { #m, reg_offset, true }
+
+static const struct xgmac_stats xgmac_gstrings_stats[] = {
+       XGMAC_STAT(tx_frame_flushed),
+       XGMAC_STAT(tx_payload_error),
+       XGMAC_STAT(tx_ip_header_error),
+       XGMAC_STAT(tx_local_fault),
+       XGMAC_STAT(tx_remote_fault),
+       XGMAC_STAT(tx_early),
+       XGMAC_STAT(tx_process_stopped),
+       XGMAC_STAT(tx_jabber),
+       XGMAC_STAT(rx_buf_unav),
+       XGMAC_STAT(rx_process_stopped),
+       XGMAC_STAT(rx_payload_error),
+       XGMAC_STAT(rx_ip_header_error),
+       XGMAC_STAT(rx_da_filter_fail),
+       XGMAC_STAT(rx_sa_filter_fail),
+       XGMAC_STAT(fatal_bus_error),
+       XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
+       XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
+       XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
+       XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
+       XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
+};
+#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+                                        struct ethtool_stats *dummy,
+                                        u64 *data)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void *p = priv;
+       int i;
+
+       for (i = 0; i < XGMAC_STATS_LEN; i++) {
+               if (xgmac_gstrings_stats[i].is_reg)
+                       *data++ = readl(priv->base +
+                               xgmac_gstrings_stats[i].stat_offset);
+               else
+                       *data++ = *(u32 *)(p +
+                               xgmac_gstrings_stats[i].stat_offset);
+       }
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return XGMAC_STATS_LEN;
+       default:
+               return -EINVAL;
+       }
+}
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset,
+                                  u8 *data)
+{
+       int i;
+       u8 *p = data;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < XGMAC_STATS_LEN; i++) {
+                       memcpy(p, xgmac_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+
+static void xgmac_get_wol(struct net_device *dev,
+                              struct ethtool_wolinfo *wol)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+
+       if (device_can_wakeup(priv->device)) {
+               wol->supported = WAKE_MAGIC | WAKE_UCAST;
+               wol->wolopts = priv->wolopts;
+       }
+}
+
+static int xgmac_set_wol(struct net_device *dev,
+                             struct ethtool_wolinfo *wol)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+       if (!device_can_wakeup(priv->device))
+               return -ENOTSUPP;
+
+       if (wol->wolopts & ~support)
+               return -EINVAL;
+
+       priv->wolopts = wol->wolopts;
+
+       if (wol->wolopts) {
+               device_set_wakeup_enable(priv->device, 1);
+               enable_irq_wake(dev->irq);
+       } else {
+               device_set_wakeup_enable(priv->device, 0);
+               disable_irq_wake(dev->irq);
+       }
+
+       return 0;
+}
+
+static struct ethtool_ops xgmac_ethtool_ops = {
+       .get_settings = xgmac_ethtool_getsettings,
+       .get_link = ethtool_op_get_link,
+       .get_pauseparam = xgmac_get_pauseparam,
+       .set_pauseparam = xgmac_set_pauseparam,
+       .get_ethtool_stats = xgmac_get_ethtool_stats,
+       .get_strings = xgmac_get_strings,
+       .get_wol = xgmac_get_wol,
+       .set_wol = xgmac_set_wol,
+       .get_sset_count = xgmac_get_sset_count,
+};
+
+/**
+ * xgmac_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int xgmac_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct resource *res;
+       struct net_device *ndev = NULL;
+       struct xgmac_priv *priv = NULL;
+       u32 uid;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       if (!request_mem_region(res->start, resource_size(res), pdev->name))
+               return -EBUSY;
+
+       ndev = alloc_etherdev(sizeof(struct xgmac_priv));
+       if (!ndev) {
+               ret = -ENOMEM;
+               goto err_alloc;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       priv = netdev_priv(ndev);
+       platform_set_drvdata(pdev, ndev);
+       ether_setup(ndev);
+       ndev->netdev_ops = &xgmac_netdev_ops;
+       SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+       spin_lock_init(&priv->stats_lock);
+
+       priv->device = &pdev->dev;
+       priv->dev = ndev;
+       priv->rx_pause = 1;
+       priv->tx_pause = 1;
+
+       priv->base = ioremap(res->start, resource_size(res));
+       if (!priv->base) {
+               netdev_err(ndev, "ioremap failed\n");
+               ret = -ENOMEM;
+               goto err_io;
+       }
+
+       uid = readl(priv->base + XGMAC_VERSION);
+       netdev_info(ndev, "h/w version is 0x%x\n", uid);
+
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+       ndev->irq = platform_get_irq(pdev, 0);
+       if (ndev->irq == -ENXIO) {
+               netdev_err(ndev, "No irq resource\n");
+               ret = ndev->irq;
+               goto err_irq;
+       }
+
+       ret = request_irq(ndev->irq, xgmac_interrupt, 0,
+                         dev_name(&pdev->dev), ndev);
+       if (ret < 0) {
+               netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+                       ndev->irq, ret);
+               goto err_irq;
+       }
+
+       priv->pmt_irq = platform_get_irq(pdev, 1);
+       if (priv->pmt_irq == -ENXIO) {
+               netdev_err(ndev, "No pmt irq resource\n");
+               ret = priv->pmt_irq;
+               goto err_pmt_irq;
+       }
+
+       ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
+                         dev_name(&pdev->dev), ndev);
+       if (ret < 0) {
+               netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+                       priv->pmt_irq, ret);
+               goto err_pmt_irq;
+       }
+
+       device_set_wakeup_capable(&pdev->dev, 1);
+       if (device_can_wakeup(priv->device))
+               priv->wolopts = WAKE_MAGIC;     /* Magic Frame as default */
+
+       ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+       if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
+               ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                    NETIF_F_RXCSUM;
+       ndev->features |= ndev->hw_features;
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+
+       /* Get the MAC address */
+       xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               netdev_warn(ndev, "MAC address %pM not valid",
+                        ndev->dev_addr);
+
+       netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+       ret = register_netdev(ndev);
+       if (ret)
+               goto err_reg;
+
+       return 0;
+
+err_reg:
+       netif_napi_del(&priv->napi);
+       free_irq(priv->pmt_irq, ndev);
+err_pmt_irq:
+       free_irq(ndev->irq, ndev);
+err_irq:
+       iounmap(priv->base);
+err_io:
+       free_netdev(ndev);
+err_alloc:
+       release_mem_region(res->start, resource_size(res));
+       platform_set_drvdata(pdev, NULL);
+       return ret;
+}
+
+/**
+ * xgmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int xgmac_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct xgmac_priv *priv = netdev_priv(ndev);
+       struct resource *res;
+
+       xgmac_mac_disable(priv->base);
+
+       /* Free the IRQ lines */
+       free_irq(ndev->irq, ndev);
+       free_irq(priv->pmt_irq, ndev);
+
+       platform_set_drvdata(pdev, NULL);
+       unregister_netdev(ndev);
+       netif_napi_del(&priv->napi);
+
+       iounmap(priv->base);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(res->start, resource_size(res));
+
+       free_netdev(ndev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+       unsigned int pmt = 0;
+
+       if (mode & WAKE_MAGIC)
+               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+       if (mode & WAKE_UCAST)
+               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
+
+       writel(pmt, ioaddr + XGMAC_PMT);
+}
+
+static int xgmac_suspend(struct device *dev)
+{
+       struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+       struct xgmac_priv *priv = netdev_priv(ndev);
+       u32 value;
+
+       if (!ndev || !netif_running(ndev))
+               return 0;
+
+       netif_device_detach(ndev);
+       napi_disable(&priv->napi);
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+       if (device_may_wakeup(priv->device)) {
+               /* Stop TX/RX DMA Only */
+               value = readl(priv->base + XGMAC_DMA_CONTROL);
+               value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+               writel(value, priv->base + XGMAC_DMA_CONTROL);
+
+               xgmac_pmt(priv->base, priv->wolopts);
+       } else
+               xgmac_mac_disable(priv->base);
+
+       return 0;
+}
+
+static int xgmac_resume(struct device *dev)
+{
+       struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+       struct xgmac_priv *priv = netdev_priv(ndev);
+       void __iomem *ioaddr = priv->base;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       xgmac_pmt(ioaddr, 0);
+
+       /* Enable the MAC and DMA */
+       xgmac_mac_enable(ioaddr);
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+       netif_device_attach(ndev);
+       napi_enable(&priv->napi);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
+#define XGMAC_PM_OPS (&xgmac_pm_ops)
+#else
+#define XGMAC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id xgmac_of_match[] = {
+       { .compatible = "calxeda,hb-xgmac", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_of_match);
+
+static struct platform_driver xgmac_driver = {
+       .driver = {
+               .name = "calxedaxgmac",
+               .of_match_table = xgmac_of_match,
+       },
+       .probe = xgmac_probe,
+       .remove = xgmac_remove,
+       .driver.pm = XGMAC_PM_OPS,
+};
+
+module_platform_driver(xgmac_driver);
+
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
+MODULE_LICENSE("GPL v2");
index ca26d97171bddda55a642d330e24f1b43160cc7d..1d17c92f2dda175547ec107a599fa8f42b37ce5d 100644 (file)
@@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct adapter *adapter = dev->ml_priv;
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(adapter->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(adapter->pdev),
+               sizeof(info->bus_info));
 }
 
 static int get_sset_count(struct net_device *dev, int sset)
@@ -849,7 +849,8 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
        return 0;
 }
 
-static u32 t1_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t t1_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -863,9 +864,9 @@ static u32 t1_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int t1_set_features(struct net_device *dev, u32 features)
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
        struct adapter *adapter = dev->ml_priv;
 
        if (changed & NETIF_F_HW_VLAN_RX)
index f9b6023000404da4d012fe186dc5a704ce7e8db4..47a84359d4e44b65fa6536553215f714a2df47a5 100644 (file)
@@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
 /*
  * Enable/disable VLAN acceleration.
  */
-void t1_vlan_mode(struct adapter *adapter, u32 features)
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
 {
        struct sge *sge = adapter->sge;
 
index e03980bcdd6516adcbbbfeaedcf5ad9db172329d..b9bf16b385f7241c3cffc353e3fbc0feb798eafd 100644 (file)
@@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie);
 int t1_poll(struct napi_struct *, int);
 
 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
 void t1_sge_start(struct sge *);
 void t1_sge_stop(struct sge *);
 int t1_sge_intr_error_handler(struct sge *);
index 4d15c8f99c3b8911d4f9ec71c61b4be40988379c..857cc254cab8795362dc07373631aae04d404b6b 100644 (file)
@@ -1576,12 +1576,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
        t3_get_tp_version(adapter, &tp_vers);
        spin_unlock(&adapter->stats_lock);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(adapter->pdev));
-       if (!fw_vers)
-               strcpy(info->fw_version, "N/A");
-       else {
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(adapter->pdev),
+               sizeof(info->bus_info));
+       if (fw_vers)
                snprintf(info->fw_version, sizeof(info->fw_version),
                         "%s %u.%u.%u TP %u.%u.%u",
                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
@@ -1591,7 +1590,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
                         G_TP_VERSION_MAJOR(tp_vers),
                         G_TP_VERSION_MINOR(tp_vers),
                         G_TP_VERSION_MICRO(tp_vers));
-       }
 }
 
 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -2531,7 +2529,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
        }
 }
 
-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
@@ -2552,7 +2550,8 @@ static void cxgb_vlan_mode(struct net_device *dev, u32 features)
        t3_synchronize_rx(adapter, pi);
 }
 
-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -2566,9 +2565,9 @@ static u32 cxgb_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                cxgb_vlan_mode(dev, features);
index 90ff1318cc05fbc5ed0e08141962d8a87140c74f..65e4b280619a7bc3cfe1891aecb9b64f5ee6bceb 100644 (file)
@@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
        case (NETEVENT_REDIRECT):{
                struct netevent_redirect *nr = ctx;
                cxgb_redirect(nr->old, nr->new);
-               cxgb_neigh_update(dst_get_neighbour(nr->new));
+               cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
                break;
        }
        default:
@@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev)
 
 static void cxgb_neigh_update(struct neighbour *neigh)
 {
-       struct net_device *dev = neigh->dev;
+       struct net_device *dev;
 
+       if (!neigh)
+               return;
+       dev = neigh->dev;
        if (dev && (is_offloading(dev))) {
                struct t3cdev *tdev = dev2t3cdev(dev);
 
@@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
 {
        struct net_device *olddev, *newdev;
+       struct neighbour *n;
        struct tid_info *ti;
        struct t3cdev *tdev;
        u32 tid;
@@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        struct l2t_entry *e;
        struct t3c_tid_entry *te;
 
-       olddev = dst_get_neighbour(old)->dev;
-       newdev = dst_get_neighbour(new)->dev;
+       n = dst_get_neighbour_noref(old);
+       if (!n)
+               return;
+       olddev = n->dev;
+
+       n = dst_get_neighbour_noref(new);
+       if (!n)
+               return;
+       newdev = n->dev;
+
        if (!is_offloading(olddev))
                return;
        if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        }
 
        /* Add new L2T entry */
-       e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+       e = t3_l2t_get(tdev, new, newdev);
        if (!e) {
                printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
                       __func__);
@@ -1301,7 +1313,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
 
 out_free_l2t:
        t3_free_l2t(L2DATA(dev));
-       rcu_assign_pointer(dev->l2opt, NULL);
+       RCU_INIT_POINTER(dev->l2opt, NULL);
 out_free:
        kfree(t);
        return err;
@@ -1329,7 +1341,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
        rcu_read_lock();
        d = L2DATA(tdev);
        rcu_read_unlock();
-       rcu_assign_pointer(tdev->l2opt, NULL);
+       RCU_INIT_POINTER(tdev->l2opt, NULL);
        call_rcu(&d->rcu_head, clean_l2_data);
        if (t->nofail_skb)
                kfree_skb(t->nofail_skb);
index 70fec8b1140f9239ec1975e0c768381c0fd54b92..3fa3c8833ed79e7257c8ff8ef4769828e78f7025 100644 (file)
@@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
        spin_unlock(&e->lock);
 }
 
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
                             struct net_device *dev)
 {
        struct l2t_entry *e = NULL;
+       struct neighbour *neigh;
+       struct port_info *p;
        struct l2t_data *d;
        int hash;
-       u32 addr = *(u32 *) neigh->primary_key;
-       int ifidx = neigh->dev->ifindex;
-       struct port_info *p = netdev_priv(dev);
-       int smt_idx = p->port_id;
+       u32 addr;
+       int ifidx;
+       int smt_idx;
 
        rcu_read_lock();
+       neigh = dst_get_neighbour_noref(dst);
+       if (!neigh)
+               goto done_rcu;
+
+       addr = *(u32 *) neigh->primary_key;
+       ifidx = neigh->dev->ifindex;
+
+       if (!dev)
+               dev = neigh->dev;
+       p = netdev_priv(dev);
+       smt_idx = p->port_id;
+
        d = L2DATA(cdev);
        if (!d)
                goto done_rcu;
@@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
                        l2t_hold(d, e);
                        if (atomic_read(&e->refcnt) == 1)
                                reuse_entry(e, neigh);
-                       goto done;
+                       goto done_unlock;
                }
 
        /* Need to allocate a new entry */
@@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
                        e->vlan = VLAN_NONE;
                spin_unlock(&e->lock);
        }
-done:
+done_unlock:
        write_unlock_bh(&d->lock);
 done_rcu:
        rcu_read_unlock();
index c5f54796e2cbed868d8bc65a60bd127f23c995bc..c4e864369751ae212fe5675aae91a76ad41847a1 100644 (file)
@@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
 
 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
                             struct net_device *dev);
 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
                     struct l2t_entry *e);
index 4c8f42afa3c6d2b9b29e2925ab52e80ffe742538..7b6b43d576d125273e096213a15d1a051ebe3dc7 100644 (file)
@@ -243,7 +243,7 @@ module_param_array(intr_cnt, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_cnt,
                 "thresholds 1..3 for queue interrupt packet counters");
 
-static int vf_acls;
+static bool vf_acls;
 
 #ifdef CONFIG_PCI_IOV
 module_param(vf_acls, bool, 0644);
@@ -1002,13 +1002,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct adapter *adapter = netdev2adap(dev);
 
-       strcpy(info->driver, KBUILD_MODNAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(adapter->pdev));
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(adapter->pdev),
+               sizeof(info->bus_info));
 
-       if (!adapter->params.fw_vers)
-               strcpy(info->fw_version, "N/A");
-       else
+       if (adapter->params.fw_vers)
                snprintf(info->fw_version, sizeof(info->fw_version),
                        "%u.%u.%u.%u, TP %u.%u.%u.%u",
                        FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
@@ -1855,10 +1854,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        return err;
 }
 
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
        const struct port_info *pi = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
        int err;
 
        if (!(changed & NETIF_F_HW_VLAN_RX))
@@ -1872,30 +1871,30 @@ static int cxgb_set_features(struct net_device *dev, u32 features)
        return err;
 }
 
-static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+static u32 get_rss_table_size(struct net_device *dev)
+{
+       const struct port_info *pi = netdev_priv(dev);
+
+       return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p)
 {
        const struct port_info *pi = netdev_priv(dev);
-       unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
+       unsigned int n = pi->rss_size;
 
-       p->size = pi->rss_size;
        while (n--)
-               p->ring_index[n] = pi->rss[n];
+               p[n] = pi->rss[n];
        return 0;
 }
 
-static int set_rss_table(struct net_device *dev,
-                        const struct ethtool_rxfh_indir *p)
+static int set_rss_table(struct net_device *dev, const u32 *p)
 {
        unsigned int i;
        struct port_info *pi = netdev_priv(dev);
 
-       if (p->size != pi->rss_size)
-               return -EINVAL;
-       for (i = 0; i < p->size; i++)
-               if (p->ring_index[i] >= pi->nqsets)
-                       return -EINVAL;
-       for (i = 0; i < p->size; i++)
-               pi->rss[i] = p->ring_index[i];
+       for (i = 0; i < pi->rss_size; i++)
+               pi->rss[i] = p[i];
        if (pi->adapter->flags & FULL_INIT_DONE)
                return write_rss(pi, pi->rss);
        return 0;
@@ -1990,6 +1989,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
        .get_wol           = get_wol,
        .set_wol           = set_wol,
        .get_rxnfc         = get_rxnfc,
+       .get_rxfh_indir_size = get_rss_table_size,
        .get_rxfh_indir    = get_rss_table,
        .set_rxfh_indir    = set_rss_table,
        .flash_device      = set_flash,
@@ -3449,7 +3449,7 @@ static int __devinit init_rss(struct adapter *adap)
                if (!pi->rss)
                        return -ENOMEM;
                for (j = 0; j < pi->rss_size; j++)
-                       pi->rss[j] = j % pi->nqsets;
+                       pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
        }
        return 0;
 }
@@ -3537,7 +3537,7 @@ static int __devinit init_one(struct pci_dev *pdev,
 {
        int func, i, err;
        struct port_info *pi;
-       unsigned int highdma = 0;
+       bool highdma = false;
        struct adapter *adapter = NULL;
 
        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -3563,7 +3563,7 @@ static int __devinit init_one(struct pci_dev *pdev,
        }
 
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               highdma = NETIF_F_HIGHDMA;
+               highdma = true;
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
                if (err) {
                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
@@ -3637,7 +3637,9 @@ static int __devinit init_one(struct pci_dev *pdev,
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                        NETIF_F_RXCSUM | NETIF_F_RXHASH |
                        NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-               netdev->features |= netdev->hw_features | highdma;
+               if (highdma)
+                       netdev->hw_features |= NETIF_F_HIGHDMA;
+               netdev->features |= netdev->hw_features;
                netdev->vlan_features = netdev->features & VLAN_FEAT;
 
                netdev->priv_flags |= IFF_UNICAST_FLT;
index 140254c7cba900aea3c80336f7033db651c957ba..2dae7959f00082c46c9f00be8b26c80675ae4c22 100644 (file)
@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
        __be64 *d = &q->desc[q->pidx];
        struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 
-       gfp |= __GFP_NOWARN;         /* failures are expected */
+       gfp |= __GFP_NOWARN | __GFP_COLD;
 
 #if FL_PG_ORDER > 0
        /*
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 #endif
 
        while (n--) {
-               pg = __netdev_alloc_page(adap->port[0], gfp);
+               pg = alloc_page(gfp);
                if (unlikely(!pg)) {
                        q->alloc_failed++;
                        break;
@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
                mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
-                       netdev_free_page(adap->port[0], pg);
+                       put_page(pg);
                        goto out;
                }
                *d++ = cpu_to_be64(mapping);
index da9072bfca8b859a508943d1abdc9fd12201865c..8155cfecae19d9e97d944c09fa9d92617bbf61bc 100644 (file)
@@ -1092,7 +1092,8 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
        return ret;
 }
 
-static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -1106,10 +1107,11 @@ static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+static int cxgb4vf_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct port_info *pi = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
@@ -1203,9 +1205,10 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
 {
        struct adapter *adapter = netdev2adap(dev);
 
-       strcpy(drvinfo->driver, KBUILD_MODNAME);
-       strcpy(drvinfo->version, DRV_VERSION);
-       strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+       strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+               sizeof(drvinfo->bus_info));
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
                 "%u.%u.%u.%u, TP %u.%u.%u.%u",
                 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
index 8d5d55ad102d57a0ee2d5ff3f68f5c1861099019..c381db23e71365cedf0ffe7d6c3e518fcb45190e 100644 (file)
@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 
 alloc_small_pages:
        while (n--) {
-               page = __netdev_alloc_page(adapter->port[0],
-                                          gfp | __GFP_NOWARN);
+               page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
                if (unlikely(!page)) {
                        fl->alloc_failed++;
                        break;
@@ -664,7 +663,7 @@ alloc_small_pages:
                dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
-                       netdev_free_page(adapter->port[0], page);
+                       put_page(page);
                        break;
                }
                *d++ = cpu_to_be64(dma_addr);
index fd6247b3c0ee4dd4d105b7d1a827f2bd3e40d906..bf0fc56dba19b70d070edb00e708adea00238878 100644 (file)
@@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct enic *enic = netdev_priv(netdev);
+       int err;
 
        spin_lock(&enic->devcmd_lock);
-       enic_add_vlan(enic, vid);
+       err = enic_add_vlan(enic, vid);
        spin_unlock(&enic->devcmd_lock);
+
+       return err;
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct enic *enic = netdev_priv(netdev);
+       int err;
 
        spin_lock(&enic->devcmd_lock);
-       enic_del_vlan(enic, vid);
+       err = enic_del_vlan(enic, vid);
        spin_unlock(&enic->devcmd_lock);
+
+       return err;
 }
 
 int enic_dev_enable2(struct enic *enic, int active)
index 1f83a4747ba01fe15fdf8a6d9f5b1c7634d90026..da1cba3c410ec0d1bb9e8acfe99d7876712a577a 100644 (file)
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
        int broadcast, int promisc, int allmulti);
 int enic_dev_add_addr(struct enic *enic, u8 *addr);
 int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 int enic_dev_notify_unset(struct enic *enic);
 int enic_dev_hang_notify(struct enic *enic);
 int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
index c3786fda11dbc6a399c4f8cb32fc1646ae4b9d91..2fd9db4b1be57b440b1d07ab5d8e582c49625d05 100644 (file)
@@ -217,11 +217,11 @@ static void enic_get_drvinfo(struct net_device *netdev,
 
        enic_dev_fw_info(enic, &fw_info);
 
-       strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
-       strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-       strncpy(drvinfo->fw_version, fw_info->fw_version,
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, fw_info->fw_version,
                sizeof(drvinfo->fw_version));
-       strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+       strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
                sizeof(drvinfo->bus_info));
 }
 
@@ -2379,7 +2379,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
 
 #endif
        /* Allocate structure for port profiles */
-       enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+       enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
        if (!enic->pp) {
                pr_err("port profile alloc failed, aborting\n");
                err = -ENOMEM;
index 438f4580bf66207539761761e37c42e0ef303b78..f801754c71a7eac0754434e9077ab34092709b36 100644 (file)
@@ -474,10 +474,11 @@ static int dm9000_nway_reset(struct net_device *dev)
        return mii_nway_restart(&dm->mii);
 }
 
-static int dm9000_set_features(struct net_device *dev, u32 features)
+static int dm9000_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        board_info_t *dm = to_dm9000_board(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
        unsigned long flags;
 
        if (!(changed & NETIF_F_RXCSUM))
@@ -613,7 +614,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
 
                if (!dm->wake_state)
                        irq_set_irq_wake(dm->irq_wake, 1);
-               else if (dm->wake_state & !opts)
+               else if (dm->wake_state && !opts)
                        irq_set_irq_wake(dm->irq_wake, 0);
        }
 
index 1427739d9a514c9881e3b5b8187df303bc6d66c2..1eb46a0bb488f86ec4df92b24ec1673b6e2c3599 100644 (file)
@@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
 {
        struct de_private *de = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(de->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
        info->eedump_len = DE_EEPROM_SIZE;
 }
 
index 871bcaa7068d0f5a5eb476eb8facc5fd5cf9cd22..4d71f5ae20c8f78fd6ee5571f343d0fc1ea54370 100644 (file)
@@ -2127,14 +2127,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
     u_long iobase = 0;                     /* Clear upper 32 bits in Alphas */
     int i, j;
     struct de4x5_private *lp = netdev_priv(dev);
-    struct list_head *walk;
-
-    list_for_each(walk, &pdev->bus_list) {
-       struct pci_dev *this_dev = pci_dev_b(walk);
-
-       /* Skip the pci_bus list entry */
-       if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+    struct pci_dev *this_dev;
 
+    list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
        vendor = this_dev->vendor;
        device = this_dev->device << 8;
        if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
@@ -5196,7 +5191,7 @@ de4x5_parse_params(struct net_device *dev)
     struct de4x5_private *lp = netdev_priv(dev);
     char *p, *q, t;
 
-    lp->params.fdx = 0;
+    lp->params.fdx = false;
     lp->params.autosense = AUTO;
 
     if (args == NULL) return;
@@ -5206,7 +5201,7 @@ de4x5_parse_params(struct net_device *dev)
        t = *q;
        *q = '\0';
 
-       if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+       if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
 
        if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
            if (strstr(p, "TP")) {
index 17b11ee1745a3795fb2da04a928ebbdfb19357c5..51f7542eb451a048f851ae1979565008d1840b47 100644 (file)
@@ -1085,10 +1085,11 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
 {
        struct dmfe_board_info *np = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        if (np->pdev)
-               strcpy(info->bus_info, pci_name(np->pdev));
+               strlcpy(info->bus_info, pci_name(np->pdev),
+                       sizeof(info->bus_info));
        else
                sprintf(info->bus_info, "EISA 0x%lx %d",
                        dev->base_addr, dev->irq);
index 9656dd0647d983d1d0dc056b87d1ecf77fe5c415..4eb0d76145c2347cf5060223f6003a94bfbb7857 100644 (file)
@@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct tulip_private *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 
index 7a44a7a6adc8a0c172a4aa11dfd623ab74ccdc15..48b0b6566eef2105399e3bc06ddae226e94cde0d 100644 (file)
@@ -960,10 +960,11 @@ static void netdev_get_drvinfo(struct net_device *dev,
 {
        struct uli526x_board_info *np = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        if (np->pdev)
-               strcpy(info->bus_info, pci_name(np->pdev));
+               strlcpy(info->bus_info, pci_name(np->pdev),
+                       sizeof(info->bus_info));
        else
                sprintf(info->bus_info, "EISA 0x%lx %d",
                        dev->base_addr, dev->irq);
index 4d01219ba22ffaf11890d0686e49217db8b63749..52da7b2fe3b6123c32749bd29a4a13c7e94be128 100644 (file)
@@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
 {
        struct netdev_private *np = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index 23a65398d0115d5c879da96fe59c56bfa7339046..c24fab1e9cbe5f32fef5372f91b781db18e0553a 100644 (file)
@@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $,  Bjorn Ekwall (bj
 
 #include "de600.h"
 
-static unsigned int check_lost = 1;
+static bool check_lost = true;
 module_param(check_lost, bool, 0);
 MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
 
index dcd7f7a71ad4aaddbd426b8098b83d2b0aa8d14e..28a3a9b50b8b2856ecac014cc6797b53f7930a7f 100644 (file)
@@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev)
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct netdev_private *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
index c1063d1540c2acd993e7d695803c64917cafc18d..ce88c0f399f68903333bbabdf546ea55abfed888 100644 (file)
@@ -804,9 +804,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 static void dnet_get_drvinfo(struct net_device *dev,
                             struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, "0");
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, "0", sizeof(info->bus_info));
 }
 
 static const struct ethtool_ops dnet_ethtool_ops = {
@@ -977,18 +977,7 @@ static struct platform_driver dnet_driver = {
        },
 };
 
-static int __init dnet_init(void)
-{
-       return platform_driver_register(&dnet_driver);
-}
-
-static void __exit dnet_exit(void)
-{
-       platform_driver_unregister(&dnet_driver);
-}
-
-module_init(dnet_init);
-module_exit(dnet_exit);
+module_platform_driver(dnet_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Dave DNET Ethernet driver");
index 644e8fed836497483d8d2e5b59a61c03c8c0dcf7..cbdec2536da6e92f99d1f2839a87541f3a4a43d5 100644 (file)
@@ -40,6 +40,7 @@
 #define OC_NAME                        "Emulex OneConnect 10Gbps NIC"
 #define OC_NAME_BE             OC_NAME "(be3)"
 #define OC_NAME_LANCER         OC_NAME "(Lancer)"
+#define OC_NAME_SH             OC_NAME "(Skyhawk)"
 #define DRV_DESC               "ServerEngines BladeEngine 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID           0x19a2
@@ -50,6 +51,7 @@
 #define OC_DEVICE_ID2          0x710   /* Device Id for BE3 cards */
 #define OC_DEVICE_ID3          0xe220  /* Device id for Lancer cards */
 #define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5          0x720   /* Device Id for Skyhawk cards */
 
 static inline char *nic_name(struct pci_dev *pdev)
 {
@@ -63,6 +65,8 @@ static inline char *nic_name(struct pci_dev *pdev)
                return OC_NAME_LANCER;
        case BE_DEVICE_ID2:
                return BE3_NAME;
+       case OC_DEVICE_ID5:
+               return OC_NAME_SH;
        default:
                return BE_NAME;
        }
@@ -288,14 +292,14 @@ struct be_drv_stats {
 };
 
 struct be_vf_cfg {
-       unsigned char vf_mac_addr[ETH_ALEN];
-       u32 vf_if_handle;
-       u32 vf_pmac_id;
-       u16 vf_vlan_tag;
-       u32 vf_tx_rate;
+       unsigned char mac_addr[ETH_ALEN];
+       int if_handle;
+       int pmac_id;
+       u16 vlan_tag;
+       u32 tx_rate;
 };
 
-#define BE_INVALID_PMAC_ID             0xffffffff
+#define BE_FLAGS_LINK_STATUS_INIT              1
 
 struct be_adapter {
        struct pci_dev *pdev;
@@ -345,13 +349,16 @@ struct be_adapter {
        struct delayed_work work;
        u16 work_counter;
 
+       u32 flags;
        /* Ethtool knobs and info */
        char fw_ver[FW_VER_LEN];
-       u32 if_handle;          /* Used to configure filtering */
+       int if_handle;          /* Used to configure filtering */
        u32 pmac_id;            /* MAC addr handle used by BE card */
        u32 beacon_state;       /* for set_phys_id */
 
        bool eeh_err;
+       bool ue_detected;
+       bool fw_timeout;
        u32 port_num;
        bool promiscuous;
        bool wol;
@@ -359,7 +366,6 @@ struct be_adapter {
        u32 function_caps;
        u32 rx_fc;              /* Rx flow control */
        u32 tx_fc;              /* Tx flow control */
-       bool ue_detected;
        bool stats_cmd_sent;
        int link_speed;
        u8 port_type;
@@ -369,16 +375,20 @@ struct be_adapter {
        u32 flash_status;
        struct completion flash_compl;
 
-       bool be3_native;
-       bool sriov_enabled;
-       struct be_vf_cfg *vf_cfg;
+       u32 num_vfs;
        u8 is_virtfn;
+       struct be_vf_cfg *vf_cfg;
+       bool be3_native;
        u32 sli_family;
        u8 hba_port_num;
        u16 pvid;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
+#define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i)                                        \
+       for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+               i++, vf_cfg++)
 
 /* BladeEngine Generation numbers */
 #define BE_GEN2 2
@@ -524,9 +534,14 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
        return adapter->num_rx_qs > 1;
 }
 
+static inline bool be_error(struct be_adapter *adapter)
+{
+       return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+}
+
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
                u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
 extern void be_parse_stats(struct be_adapter *adapter);
 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
 #endif                         /* BE_H */
index 2c7b36673dfc27bbae1c94a033ff702a2a928be4..0fcb45624796c29942a30125439d6eeb5ab1d407 100644 (file)
@@ -31,11 +31,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
 
-       if (adapter->eeh_err) {
-               dev_info(&adapter->pdev->dev,
-                       "Error in Card Detected! Cannot issue commands\n");
+       if (be_error(adapter))
                return;
-       }
 
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -128,7 +125,14 @@ done:
 static void be_async_link_state_process(struct be_adapter *adapter,
                struct be_async_event_link_state *evt)
 {
-       be_link_status_update(adapter, evt->port_link_status);
+       /* When link status changes, link speed must be re-queried from FW */
+       adapter->link_speed = -1;
+
+       /* For the initial link status do not rely on the ASYNC event as
+        * it may not be received in some cases.
+        */
+       if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
+               be_link_status_update(adapter, evt->port_link_status);
 }
 
 /* Grp5 CoS Priority evt */
@@ -266,10 +270,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
        int i, num, status = 0;
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
        for (i = 0; i < mcc_timeout; i++) {
+               if (be_error(adapter))
+                       return -EIO;
+
                num = be_process_mcc(adapter, &status);
                if (num)
                        be_cq_notify(adapter, mcc_obj->cq.id,
@@ -280,7 +284,8 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
                udelay(100);
        }
        if (i == mcc_timeout) {
-               dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+               dev_err(&adapter->pdev->dev, "FW not responding\n");
+               adapter->fw_timeout = true;
                return -1;
        }
        return status;
@@ -298,26 +303,21 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
        int msecs = 0;
        u32 ready;
 
-       if (adapter->eeh_err) {
-               dev_err(&adapter->pdev->dev,
-                       "Error detected in card.Cannot issue commands\n");
-               return -EIO;
-       }
-
        do {
+               if (be_error(adapter))
+                       return -EIO;
+
                ready = ioread32(db);
-               if (ready == 0xffffffff) {
-                       dev_err(&adapter->pdev->dev,
-                               "pci slot disconnected\n");
+               if (ready == 0xffffffff)
                        return -1;
-               }
 
                ready &= MPU_MAILBOX_DB_RDY_MASK;
                if (ready)
                        break;
 
                if (msecs > 4000) {
-                       dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+                       dev_err(&adapter->pdev->dev, "FW not responding\n");
+                       adapter->fw_timeout = true;
                        be_detect_dump_ue(adapter);
                        return -1;
                }
@@ -555,9 +555,6 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
        u8 *wrb;
        int status;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
        if (mutex_lock_interruptible(&adapter->mbox_lock))
                return -1;
 
@@ -619,7 +616,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
 
 /* Use MCC */
 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle)
+                       u8 type, bool permanent, u32 if_handle, u32 pmac_id)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mac_query *req;
@@ -641,6 +638,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
                req->permanent = 1;
        } else {
                req->if_id = cpu_to_le16((u16) if_handle);
+               req->pmac_id = cpu_to_le32(pmac_id);
                req->permanent = 0;
        }
 
@@ -695,12 +693,15 @@ err:
 }
 
 /* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_del *req;
        int status;
 
+       if (pmac_id == -1)
+               return 0;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -923,10 +924,14 @@ int be_cmd_txq_create(struct be_adapter *adapter,
        void *ctxt;
        int status;
 
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
 
-       wrb = wrb_from_mbox(adapter);
        req = embedded_payload(wrb);
        ctxt = &req->context;
 
@@ -952,14 +957,15 @@ int be_cmd_txq_create(struct be_adapter *adapter,
 
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
-       status = be_mbox_notify_wait(adapter);
+       status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
                txq->id = le16_to_cpu(resp->cid);
                txq->created = true;
        }
 
-       mutex_unlock(&adapter->mbox_lock);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
 
        return status;
 }
@@ -1018,9 +1024,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
        u8 subsys = 0, opcode = 0;
        int status;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
        if (mutex_lock_interruptible(&adapter->mbox_lock))
                return -1;
 
@@ -1136,16 +1139,13 @@ err:
 }
 
 /* Uses MCCQ */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_if_destroy *req;
        int status;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
-       if (!interface_id)
+       if (interface_id == -1)
                return 0;
 
        spin_lock_bh(&adapter->mcc_lock);
@@ -1239,7 +1239,7 @@ err:
 
 /* Uses synchronous mcc */
 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
-                       u16 *link_speed, u32 dom)
+                            u16 *link_speed, u8 *link_status, u32 dom)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_link_status *req;
@@ -1247,6 +1247,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
 
        spin_lock_bh(&adapter->mcc_lock);
 
+       if (link_status)
+               *link_status = LINK_DOWN;
+
        wrb = wrb_from_mccq(adapter);
        if (!wrb) {
                status = -EBUSY;
@@ -1254,6 +1257,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
        }
        req = embedded_payload(wrb);
 
+       if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+               req->hdr.version = 1;
+
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
 
@@ -1261,10 +1267,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
        if (!status) {
                struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
                if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
-                       *link_speed = le16_to_cpu(resp->link_speed);
+                       if (link_speed)
+                               *link_speed = le16_to_cpu(resp->link_speed);
                        if (mac_speed)
                                *mac_speed = resp->mac_speed;
                }
+               if (link_status)
+                       *link_status = resp->logical_link_status;
        }
 
 err:
@@ -1673,8 +1682,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_rss_config *req;
-       u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
-                       0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+       u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
+                       0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
+                       0x3ea83c02, 0x4a110304};
        int status;
 
        if (mutex_lock_interruptible(&adapter->mbox_lock))
@@ -1836,6 +1846,53 @@ err_unlock:
        return status;
 }
 
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+               u32 data_size, u32 data_offset, const char *obj_name,
+               u32 *data_read, u32 *eof, u8 *addn_status)
+{
+       struct be_mcc_wrb *wrb;
+       struct lancer_cmd_req_read_object *req;
+       struct lancer_cmd_resp_read_object *resp;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err_unlock;
+       }
+
+       req = embedded_payload(wrb);
+
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                       OPCODE_COMMON_READ_OBJECT,
+                       sizeof(struct lancer_cmd_req_read_object), wrb,
+                       NULL);
+
+       req->desired_read_len = cpu_to_le32(data_size);
+       req->read_offset = cpu_to_le32(data_offset);
+       strcpy(req->object_name, obj_name);
+       req->descriptor_count = cpu_to_le32(1);
+       req->buf_len = cpu_to_le32(data_size);
+       req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+       req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+       status = be_mcc_notify_wait(adapter);
+
+       resp = embedded_payload(wrb);
+       if (!status) {
+               *data_read = le32_to_cpu(resp->actual_read_len);
+               *eof = le32_to_cpu(resp->eof);
+       } else {
+               *addn_status = resp->additional_status;
+       }
+
+err_unlock:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
                        u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
@@ -2238,3 +2295,99 @@ err:
        mutex_unlock(&adapter->mbox_lock);
        return status;
 }
+
+/* Uses synchronous MCCQ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+                                                       u32 *pmac_id)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_get_mac_list *req;
+       int status;
+       int mac_count;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+       req = embedded_payload(wrb);
+
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                               OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
+                               wrb, NULL);
+
+       req->hdr.domain = domain;
+
+       status = be_mcc_notify_wait(adapter);
+       if (!status) {
+               struct be_cmd_resp_get_mac_list *resp =
+                                               embedded_payload(wrb);
+               int i;
+               u8 *ctxt = &resp->context[0][0];
+               status = -EIO;
+               mac_count = resp->mac_count;
+               be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+               for (i = 0; i < mac_count; i++) {
+                       if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
+                                          act, ctxt)) {
+                               *pmac_id = AMAP_GET_BITS
+                                       (struct amap_get_mac_list_context,
+                                        macid, ctxt);
+                               status = 0;
+                               break;
+                       }
+                       ctxt += sizeof(struct amap_get_mac_list_context) / 8;
+               }
+       }
+
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+                       u8 mac_count, u32 domain)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_set_mac_list *req;
+       int status;
+       struct be_dma_mem cmd;
+
+       memset(&cmd, 0, sizeof(struct be_dma_mem));
+       cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+                       &cmd.dma, GFP_KERNEL);
+       if (!cmd.va) {
+               dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = cmd.va;
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                               OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+                               wrb, &cmd);
+
+       req->hdr.domain = domain;
+       req->mac_count = mac_count;
+       if (mac_count)
+               memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+
+       status = be_mcc_notify_wait(adapter);
+
+err:
+       dma_free_coherent(&adapter->pdev->dev, cmd.size,
+                               cmd.va, cmd.dma);
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
index a35cd03fac4e840ab5f4aa7a15cae8ac4c01e652..dca89249088f6b7cd4439a1a2048aec059047ba5 100644 (file)
@@ -189,6 +189,9 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_PHY_DETAILS                  102
 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP          103
 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES   121
+#define OPCODE_COMMON_GET_MAC_LIST                     147
+#define OPCODE_COMMON_SET_MAC_LIST                     148
+#define OPCODE_COMMON_READ_OBJECT                      171
 #define OPCODE_COMMON_WRITE_OBJECT                     172
 
 #define OPCODE_ETH_RSS_CONFIG                          1
@@ -294,6 +297,7 @@ struct be_cmd_req_mac_query {
        u8 type;
        u8 permanent;
        u16 if_id;
+       u32 pmac_id;
 } __packed;
 
 struct be_cmd_resp_mac_query {
@@ -956,7 +960,8 @@ struct be_cmd_resp_link_status {
        u8 mgmt_mac_duplex;
        u8 mgmt_mac_speed;
        u16 link_speed;
-       u32 rsvd0;
+       u8 logical_link_status;
+       u8 rsvd1[3];
 } __packed;
 
 /******************** Port Identification ***************************/
@@ -1161,6 +1166,38 @@ struct lancer_cmd_resp_write_object {
        u32 actual_write_len;
 };
 
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK                 (32*1024)
+#define LANCER_READ_FILE_EOF_MASK              0x80000000
+
+#define LANCER_FW_DUMP_FILE                    "/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE                     "/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE                     "/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+       struct be_cmd_req_hdr hdr;
+       u32 desired_read_len;
+       u32 read_offset;
+       u8 object_name[104];
+       u32 descriptor_count;
+       u32 buf_len;
+       u32 addr_low;
+       u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+       u8 opcode;
+       u8 subsystem;
+       u8 rsvd1[2];
+       u8 status;
+       u8 additional_status;
+       u8 rsvd2[2];
+       u32 resp_len;
+       u32 actual_resp_len;
+       u32 actual_read_len;
+       u32 eof;
+};
+
 /************************ WOL *******************************/
 struct be_cmd_req_acpi_wol_magic_config{
        struct be_cmd_req_hdr hdr;
@@ -1307,6 +1344,34 @@ struct be_cmd_resp_set_func_cap {
        u8 rsvd[212];
 };
 
+/******************** GET/SET_MACLIST  **************************/
+#define BE_MAX_MAC                     64
+struct amap_get_mac_list_context {
+       u8 macid[31];
+       u8 act;
+} __packed;
+
+struct be_cmd_req_get_mac_list {
+       struct be_cmd_req_hdr hdr;
+       u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+       struct be_cmd_resp_hdr hdr;
+       u8 mac_count;
+       u8 rsvd1;
+       u16 rsvd2;
+       u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+       struct be_cmd_req_hdr hdr;
+       u8 mac_count;
+       u8 rsvd1;
+       u16 rsvd2;
+       struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
 /*************** HW Stats Get v1 **********************************/
 #define BE_TXP_SW_SZ                   48
 struct be_port_rxf_stats_v1 {
@@ -1413,15 +1478,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle);
+                       u8 type, bool permanent, u32 if_handle, u32 pmac_id);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
                        u32 if_id, u32 *pmac_id, u32 domain);
 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
-                       u32 pmac_id, u32 domain);
+                       int pmac_id, u32 domain);
 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
                        u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
                        u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
                        u32 domain);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
                        struct be_queue_info *eq, int eq_delay);
@@ -1443,8 +1508,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
                        int type);
 extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
                        struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter,
-                       u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+                                   u16 *link_speed, u8 *link_status, u32 dom);
 extern int be_cmd_reset(struct be_adapter *adapter);
 extern int be_cmd_get_stats(struct be_adapter *adapter,
                        struct be_dma_mem *nonemb_cmd);
@@ -1480,6 +1545,9 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter,
                                u32 data_size, u32 data_offset,
                                const char *obj_name,
                                u32 *data_written, u8 *addn_status);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+               u32 data_size, u32 data_offset, const char *obj_name,
+               u32 *data_read, u32 *eof, u8 *addn_status);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
                                int offset);
 extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1506,4 +1574,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
 extern int be_cmd_req_native_mode(struct be_adapter *adapter);
 extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
 extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+                                                       u32 *pmac_id);
+extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+                                               u8 mac_count, u32 domain);
 
index bf8153ea4ed81a6a443681c551e7f45fd9aa7bc8..6db6b6ae5e9b1f66e733f7ff530784fa2237fd11 100644 (file)
@@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev,
        memset(fw_on_flash, 0 , sizeof(fw_on_flash));
        be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
 
-       strcpy(drvinfo->driver, DRV_NAME);
-       strcpy(drvinfo->version, DRV_VER);
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
        strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
        if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
                strcat(drvinfo->fw_version, " [");
@@ -136,21 +136,84 @@ static void be_get_drvinfo(struct net_device *netdev,
                strcat(drvinfo->fw_version, "]");
        }
 
-       strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = 0;
        drvinfo->eedump_len = 0;
 }
 
+static u32
+lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+       u32 data_read = 0, eof;
+       u8 addn_status;
+       struct be_dma_mem data_len_cmd;
+       int status;
+
+       memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+       /* data_offset and data_size should be 0 to get reg len */
+       status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
+                               file_name, &data_read, &eof, &addn_status);
+
+       return data_read;
+}
+
+static int
+lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+               u32 buf_len, void *buf)
+{
+       struct be_dma_mem read_cmd;
+       u32 read_len = 0, total_read_len = 0, chunk_size;
+       u32 eof = 0;
+       u8 addn_status;
+       int status = 0;
+
+       read_cmd.size = LANCER_READ_FILE_CHUNK;
+       read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+                       &read_cmd.dma);
+
+       if (!read_cmd.va) {
+               dev_err(&adapter->pdev->dev,
+                               "Memory allocation failure while reading dump\n");
+               return -ENOMEM;
+       }
+
+       while ((total_read_len < buf_len) && !eof) {
+               chunk_size = min_t(u32, (buf_len - total_read_len),
+                               LANCER_READ_FILE_CHUNK);
+               chunk_size = ALIGN(chunk_size, 4);
+               status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+                               total_read_len, file_name, &read_len,
+                               &eof, &addn_status);
+               if (!status) {
+                       memcpy(buf + total_read_len, read_cmd.va, read_len);
+                       total_read_len += read_len;
+                       eof &= LANCER_READ_FILE_EOF_MASK;
+               } else {
+                       status = -EIO;
+                       break;
+               }
+       }
+       pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+                       read_cmd.dma);
+
+       return status;
+}
+
 static int
 be_get_reg_len(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        u32 log_size = 0;
 
-       if (be_physfn(adapter))
-               be_cmd_get_reg_len(adapter, &log_size);
-
+       if (be_physfn(adapter)) {
+               if (lancer_chip(adapter))
+                       log_size = lancer_cmd_get_file_len(adapter,
+                                       LANCER_FW_DUMP_FILE);
+               else
+                       be_cmd_get_reg_len(adapter, &log_size);
+       }
        return log_size;
 }
 
@@ -161,7 +224,11 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
 
        if (be_physfn(adapter)) {
                memset(buf, 0, regs->len);
-               be_cmd_get_regs(adapter, regs->len, buf);
+               if (lancer_chip(adapter))
+                       lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+                                       regs->len, buf);
+               else
+                       be_cmd_get_regs(adapter, regs->len, buf);
        }
 }
 
@@ -362,11 +429,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        struct be_phy_info phy_info;
        u8 mac_speed = 0;
        u16 link_speed = 0;
+       u8 link_status;
        int status;
 
        if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
                status = be_cmd_link_status_query(adapter, &mac_speed,
-                                               &link_speed, 0);
+                                                 &link_speed, &link_status, 0);
+               if (!status)
+                       be_link_status_update(adapter, link_status);
 
                /* link_speed is in units of 10 Mbps */
                if (link_speed) {
@@ -453,16 +523,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        return 0;
 }
 
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+                            struct ethtool_ringparam *ring)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       ring->rx_max_pending = adapter->rx_obj[0].q.len;
-       ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
-       ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
-       ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+       ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+       ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
 }
 
 static void
@@ -636,7 +703,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
        }
 
        if (be_cmd_link_status_query(adapter, &mac_speed,
-                               &qos_link_speed, 0) != 0) {
+                                    &qos_link_speed, NULL, 0) != 0) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = -1;
        } else if (!mac_speed) {
@@ -660,7 +727,17 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
 static int
 be_get_eeprom_len(struct net_device *netdev)
 {
-       return BE_READ_SEEPROM_LEN;
+       struct be_adapter *adapter = netdev_priv(netdev);
+       if (lancer_chip(adapter)) {
+               if (be_physfn(adapter))
+                       return lancer_cmd_get_file_len(adapter,
+                                       LANCER_VPD_PF_FILE);
+               else
+                       return lancer_cmd_get_file_len(adapter,
+                                       LANCER_VPD_VF_FILE);
+       } else {
+               return BE_READ_SEEPROM_LEN;
+       }
 }
 
 static int
@@ -675,6 +752,15 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
        if (!eeprom->len)
                return -EINVAL;
 
+       if (lancer_chip(adapter)) {
+               if (be_physfn(adapter))
+                       return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+                                       eeprom->len, data);
+               else
+                       return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+                                       eeprom->len, data);
+       }
+
        eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
 
        memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
index bf266a00c7742129fd8d86699e1756d0dc1ba070..6c46753aeb43f7b6fb1172ba02c89bc6f317e160 100644 (file)
@@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
 MODULE_AUTHOR("ServerEngines Corporation");
 MODULE_LICENSE("GPL");
 
-static ushort rx_frag_size = 2048;
 static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
 module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
 
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -41,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+       { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
        { 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,7 +239,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                return -EADDRNOTAVAIL;
 
        status = be_cmd_mac_addr_query(adapter, current_mac,
-                       MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+                               MAC_ADDRESS_TYPE_NETWORK, false,
+                               adapter->if_handle, 0);
        if (status)
                goto err;
 
@@ -315,6 +318,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
        struct be_drv_stats *drvs = &adapter->drv_stats;
 
        be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+       drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+       drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
        drvs->rx_pause_frames = port_stats->rx_pause_frames;
        drvs->rx_crc_errors = port_stats->rx_crc_errors;
        drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -491,19 +496,19 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
        return stats;
 }
 
-void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 {
        struct net_device *netdev = adapter->netdev;
 
-       /* when link status changes, link speed must be re-queried from card */
-       adapter->link_speed = -1;
-       if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
-               netif_carrier_on(netdev);
-               dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
-       } else {
+       if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
                netif_carrier_off(netdev);
-               dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+               adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
        }
+
+       if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+               netif_carrier_on(netdev);
+       else
+               netif_carrier_off(netdev);
 }
 
 static void be_tx_stats_update(struct be_tx_obj *txo,
@@ -549,11 +554,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
        wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
 }
 
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+                                       struct sk_buff *skb)
+{
+       u8 vlan_prio;
+       u16 vlan_tag;
+
+       vlan_tag = vlan_tx_tag_get(skb);
+       vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+       /* If vlan priority provided by OS is NOT in available bmap */
+       if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+               vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+                               adapter->recommended_prio;
+
+       return vlan_tag;
+}
+
 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                struct sk_buff *skb, u32 wrb_cnt, u32 len)
 {
-       u8 vlan_prio = 0;
-       u16 vlan_tag = 0;
+       u16 vlan_tag;
 
        memset(hdr, 0, sizeof(*hdr));
 
@@ -584,12 +604,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 
        if (vlan_tx_tag_present(skb)) {
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
-               vlan_tag = vlan_tx_tag_get(skb);
-               vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-               /* If vlan priority provided by OS is NOT in available bmap */
-               if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
-                       vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
-                                       adapter->recommended_prio;
+               vlan_tag = be_get_tx_vlan_tag(adapter, skb);
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
        }
 
@@ -692,6 +707,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
        u32 start = txq->head;
        bool dummy_wrb, stopped = false;
 
+       /* For vlan tagged pkts, BE
+        * 1) calculates checksum even when CSO is not requested
+        * 2) calculates checksum wrongly for padded pkt less than
+        * 60 bytes long.
+        * As a workaround disable TX vlan offloading in such cases.
+        */
+       if (unlikely(vlan_tx_tag_present(skb) &&
+                    (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+               skb = skb_share_check(skb, GFP_ATOMIC);
+               if (unlikely(!skb))
+                       goto tx_drop;
+
+               skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+               if (unlikely(!skb))
+                       goto tx_drop;
+
+               skb->vlan_tci = 0;
+       }
+
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -719,6 +753,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
                txq->head = start;
                dev_kfree_skb_any(skb);
        }
+tx_drop:
        return NETDEV_TX_OK;
 }
 
@@ -746,15 +781,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
  */
 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
 {
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
        u16 vtag[BE_NUM_VLANS_SUPPORTED];
        u16 ntags = 0, i;
        int status = 0;
-       u32 if_handle;
 
        if (vf) {
-               if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
-               vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
-               status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+               vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+               status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+                                           1, 1, 0);
        }
 
        /* No need to further configure vids if in promiscuous mode */
@@ -779,31 +814,48 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
        return status;
 }
 
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       int status = 0;
 
-       adapter->vlans_added++;
-       if (!be_physfn(adapter))
-               return;
+       if (!be_physfn(adapter)) {
+               status = -EINVAL;
+               goto ret;
+       }
 
        adapter->vlan_tag[vid] = 1;
        if (adapter->vlans_added <= (adapter->max_vlans + 1))
-               be_vid_config(adapter, false, 0);
+               status = be_vid_config(adapter, false, 0);
+
+       if (!status)
+               adapter->vlans_added++;
+       else
+               adapter->vlan_tag[vid] = 0;
+ret:
+       return status;
 }
 
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       int status = 0;
 
-       adapter->vlans_added--;
-
-       if (!be_physfn(adapter))
-               return;
+       if (!be_physfn(adapter)) {
+               status = -EINVAL;
+               goto ret;
+       }
 
        adapter->vlan_tag[vid] = 0;
        if (adapter->vlans_added <= adapter->max_vlans)
-               be_vid_config(adapter, false, 0);
+               status = be_vid_config(adapter, false, 0);
+
+       if (!status)
+               adapter->vlans_added--;
+       else
+               adapter->vlan_tag[vid] = 1;
+ret:
+       return status;
 }
 
 static void be_set_rx_mode(struct net_device *netdev)
@@ -840,28 +892,30 @@ done:
 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        int status;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+       if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
                return -EINVAL;
 
-       if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
-               status = be_cmd_pmac_del(adapter,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+       if (lancer_chip(adapter)) {
+               status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
+       } else {
+               status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+                                        vf_cfg->pmac_id, vf + 1);
 
-       status = be_cmd_pmac_add(adapter, mac,
-                               adapter->vf_cfg[vf].vf_if_handle,
-                               &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+               status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+                                        &vf_cfg->pmac_id, vf + 1);
+       }
 
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
                                mac, vf);
        else
-               memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+               memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
        return status;
 }
@@ -870,18 +924,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
                        struct ifla_vf_info *vi)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (vf >= num_vfs)
+       if (vf >= adapter->num_vfs)
                return -EINVAL;
 
        vi->vf = vf;
-       vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
-       vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+       vi->tx_rate = vf_cfg->tx_rate;
+       vi->vlan = vf_cfg->vlan_tag;
        vi->qos = 0;
-       memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+       memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
 
        return 0;
 }
@@ -892,17 +947,17 @@ static int be_set_vf_vlan(struct net_device *netdev,
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if ((vf >= num_vfs) || (vlan > 4095))
+       if (vf >= adapter->num_vfs || vlan > 4095)
                return -EINVAL;
 
        if (vlan) {
-               adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+               adapter->vf_cfg[vf].vlan_tag = vlan;
                adapter->vlans_added++;
        } else {
-               adapter->vf_cfg[vf].vf_vlan_tag = 0;
+               adapter->vf_cfg[vf].vlan_tag = 0;
                adapter->vlans_added--;
        }
 
@@ -920,21 +975,25 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if ((vf >= num_vfs) || (rate < 0))
+       if (vf >= adapter->num_vfs)
                return -EINVAL;
 
-       if (rate > 10000)
-               rate = 10000;
+       if (rate < 100 || rate > 10000) {
+               dev_err(&adapter->pdev->dev,
+                       "tx rate must be between 100 and 10000 Mbps\n");
+               return -EINVAL;
+       }
 
-       adapter->vf_cfg[vf].vf_tx_rate = rate;
        status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
        if (status)
-               dev_info(&adapter->pdev->dev,
+               dev_err(&adapter->pdev->dev,
                                "tx rate %d on VF %d failed\n", rate, vf);
+       else
+               adapter->vf_cfg[vf].tx_rate = rate;
        return status;
 }
 
@@ -1645,8 +1704,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
 
 static int be_num_txqs_want(struct be_adapter *adapter)
 {
-       if ((num_vfs && adapter->sriov_enabled) ||
-               be_is_mc(adapter) ||
+       if (sriov_enabled(adapter) || be_is_mc(adapter) ||
                lancer_chip(adapter) || !be_physfn(adapter) ||
                adapter->generation == BE_GEN2)
                return 1;
@@ -1662,9 +1720,12 @@ static int be_tx_queues_create(struct be_adapter *adapter)
        u8 i;
 
        adapter->num_tx_qs = be_num_txqs_want(adapter);
-       if (adapter->num_tx_qs != MAX_TX_QS)
+       if (adapter->num_tx_qs != MAX_TX_QS) {
+               rtnl_lock();
                netif_set_real_num_tx_queues(adapter->netdev,
                        adapter->num_tx_qs);
+               rtnl_unlock();
+       }
 
        adapter->tx_eq.max_eqd = 0;
        adapter->tx_eq.min_eqd = 0;
@@ -1693,9 +1754,6 @@ static int be_tx_queues_create(struct be_adapter *adapter)
                if (be_queue_alloc(adapter, q, TX_Q_LEN,
                        sizeof(struct be_eth_wrb)))
                        goto err;
-
-               if (be_cmd_txq_create(adapter, q, cq))
-                       goto err;
        }
        return 0;
 
@@ -1728,8 +1786,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
 static u32 be_num_rxqs_want(struct be_adapter *adapter)
 {
        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-               !adapter->sriov_enabled && be_physfn(adapter) &&
-               !be_is_mc(adapter)) {
+            !sriov_enabled(adapter) && be_physfn(adapter) &&
+            !be_is_mc(adapter)) {
                return 1 + MAX_RSS_QS; /* one default non-RSS queue */
        } else {
                dev_warn(&adapter->pdev->dev,
@@ -1929,6 +1987,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
        struct be_adapter *adapter =
                container_of(tx_eq, struct be_adapter, tx_eq);
+       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
        struct be_tx_obj *txo;
        struct be_eth_tx_compl *txcp;
        int tx_compl, mcc_compl, status = 0;
@@ -1965,12 +2024,19 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        mcc_compl = be_process_mcc(adapter, &status);
 
        if (mcc_compl) {
-               struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
                be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
        }
 
        napi_complete(napi);
 
+       /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
+       if (lancer_chip(adapter) && !msix_enabled(adapter)) {
+               for_all_tx_queues(adapter, txo, i)
+                       be_cq_notify(adapter, txo->cq.id, true, 0);
+
+               be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+       }
+
        be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
        adapter->drv_stats.tx_events++;
        return 1;
@@ -1982,6 +2048,9 @@ void be_detect_dump_ue(struct be_adapter *adapter)
        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        u32 i;
 
+       if (adapter->eeh_err || adapter->ue_detected)
+               return;
+
        if (lancer_chip(adapter)) {
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2008,7 +2077,8 @@ void be_detect_dump_ue(struct be_adapter *adapter)
                sliport_status & SLIPORT_STATUS_ERR_MASK) {
                adapter->ue_detected = true;
                adapter->eeh_err = true;
-               dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+               dev_err(&adapter->pdev->dev,
+                       "Unrecoverable error in the card\n");
        }
 
        if (ue_lo) {
@@ -2036,53 +2106,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
        }
 }
 
-static void be_worker(struct work_struct *work)
-{
-       struct be_adapter *adapter =
-               container_of(work, struct be_adapter, work.work);
-       struct be_rx_obj *rxo;
-       int i;
-
-       if (!adapter->ue_detected)
-               be_detect_dump_ue(adapter);
-
-       /* when interrupts are not yet enabled, just reap any pending
-       * mcc completions */
-       if (!netif_running(adapter->netdev)) {
-               int mcc_compl, status = 0;
-
-               mcc_compl = be_process_mcc(adapter, &status);
-
-               if (mcc_compl) {
-                       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
-                       be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
-               }
-
-               goto reschedule;
-       }
-
-       if (!adapter->stats_cmd_sent) {
-               if (lancer_chip(adapter))
-                       lancer_cmd_get_pport_stats(adapter,
-                                               &adapter->stats_cmd);
-               else
-                       be_cmd_get_stats(adapter, &adapter->stats_cmd);
-       }
-
-       for_all_rx_queues(adapter, rxo, i) {
-               be_rx_eqd_update(adapter, rxo);
-
-               if (rxo->rx_post_starved) {
-                       rxo->rx_post_starved = false;
-                       be_post_rx_frags(rxo, GFP_KERNEL);
-               }
-       }
-
-reschedule:
-       adapter->work_counter++;
-       schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
 static void be_msix_disable(struct be_adapter *adapter)
 {
        if (msix_enabled(adapter)) {
@@ -2119,27 +2142,28 @@ done:
 static int be_sriov_enable(struct be_adapter *adapter)
 {
        be_check_sriov_fn_type(adapter);
+
 #ifdef CONFIG_PCI_IOV
        if (be_physfn(adapter) && num_vfs) {
                int status, pos;
-               u16 nvfs;
+               u16 dev_vfs;
 
                pos = pci_find_ext_capability(adapter->pdev,
                                                PCI_EXT_CAP_ID_SRIOV);
                pci_read_config_word(adapter->pdev,
-                                       pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+                                    pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
 
-               if (num_vfs > nvfs) {
+               adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+               if (adapter->num_vfs != num_vfs)
                        dev_info(&adapter->pdev->dev,
-                                       "Device supports %d VFs and not %d\n",
-                                       nvfs, num_vfs);
-                       num_vfs = nvfs;
-               }
+                                "Device supports %d VFs and not %d\n",
+                                adapter->num_vfs, num_vfs);
 
-               status = pci_enable_sriov(adapter->pdev, num_vfs);
-               adapter->sriov_enabled = status ? false : true;
+               status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+               if (status)
+                       adapter->num_vfs = 0;
 
-               if (adapter->sriov_enabled) {
+               if (adapter->num_vfs) {
                        adapter->vf_cfg = kcalloc(num_vfs,
                                                sizeof(struct be_vf_cfg),
                                                GFP_KERNEL);
@@ -2154,10 +2178,10 @@ static int be_sriov_enable(struct be_adapter *adapter)
 static void be_sriov_disable(struct be_adapter *adapter)
 {
 #ifdef CONFIG_PCI_IOV
-       if (adapter->sriov_enabled) {
+       if (sriov_enabled(adapter)) {
                pci_disable_sriov(adapter->pdev);
                kfree(adapter->vf_cfg);
-               adapter->sriov_enabled = false;
+               adapter->num_vfs = 0;
        }
 #endif
 }
@@ -2351,8 +2375,8 @@ static int be_close(struct net_device *netdev)
 static int be_rx_queues_setup(struct be_adapter *adapter)
 {
        struct be_rx_obj *rxo;
-       int rc, i;
-       u8 rsstable[MAX_RSS_QS];
+       int rc, i, j;
+       u8 rsstable[128];
 
        for_all_rx_queues(adapter, rxo, i) {
                rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -2364,11 +2388,15 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
        }
 
        if (be_multi_rxq(adapter)) {
-               for_all_rss_queues(adapter, rxo, i)
-                       rsstable[i] = rxo->rss_id;
+               for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+                       for_all_rss_queues(adapter, rxo, i) {
+                               if ((j + i) >= 128)
+                                       break;
+                               rsstable[j + i] = rxo->rss_id;
+                       }
+               }
+               rc = be_cmd_rss_config(adapter, rsstable, 128);
 
-               rc = be_cmd_rss_config(adapter, rsstable,
-                       adapter->num_rx_qs - 1);
                if (rc)
                        return rc;
        }
@@ -2386,6 +2414,7 @@ static int be_open(struct net_device *netdev)
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_eq_obj *tx_eq = &adapter->tx_eq;
        struct be_rx_obj *rxo;
+       u8 link_status;
        int status, i;
 
        status = be_rx_queues_setup(adapter);
@@ -2409,6 +2438,11 @@ static int be_open(struct net_device *netdev)
        /* Now that interrupts are on we can process async mcc */
        be_async_mcc_enable(adapter);
 
+       status = be_cmd_link_status_query(adapter, NULL, NULL,
+                                         &link_status, 0);
+       if (!status)
+               be_link_status_update(adapter, link_status);
+
        return 0;
 err:
        be_close(adapter->netdev);
@@ -2465,19 +2499,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
        u32 vf;
        int status = 0;
        u8 mac[ETH_ALEN];
+       struct be_vf_cfg *vf_cfg;
 
        be_vf_eth_addr_generate(adapter, mac);
 
-       for (vf = 0; vf < num_vfs; vf++) {
-               status = be_cmd_pmac_add(adapter, mac,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       &adapter->vf_cfg[vf].vf_pmac_id,
-                                       vf + 1);
+       for_all_vfs(adapter, vf_cfg, vf) {
+               if (lancer_chip(adapter)) {
+                       status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
+               } else {
+                       status = be_cmd_pmac_add(adapter, mac,
+                                                vf_cfg->if_handle,
+                                                &vf_cfg->pmac_id, vf + 1);
+               }
+
                if (status)
                        dev_err(&adapter->pdev->dev,
-                               "Mac address add failed for VF %d\n", vf);
+                       "Mac address assignment failed for VF %d\n", vf);
                else
-                       memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+                       memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
                mac[5] += 1;
        }
@@ -2486,24 +2525,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
 
 static void be_vf_clear(struct be_adapter *adapter)
 {
+       struct be_vf_cfg *vf_cfg;
        u32 vf;
 
-       for (vf = 0; vf < num_vfs; vf++) {
-               if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
-                       be_cmd_pmac_del(adapter,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
-       }
+       for_all_vfs(adapter, vf_cfg, vf) {
+               if (lancer_chip(adapter))
+                       be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
+               else
+                       be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+                                       vf_cfg->pmac_id, vf + 1);
 
-       for (vf = 0; vf < num_vfs; vf++)
-               if (adapter->vf_cfg[vf].vf_if_handle)
-                       be_cmd_if_destroy(adapter,
-                               adapter->vf_cfg[vf].vf_if_handle, vf + 1);
+               be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+       }
 }
 
 static int be_clear(struct be_adapter *adapter)
 {
-       if (be_physfn(adapter) && adapter->sriov_enabled)
+       if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
@@ -2511,61 +2549,94 @@ static int be_clear(struct be_adapter *adapter)
        be_mcc_queues_destroy(adapter);
        be_rx_queues_destroy(adapter);
        be_tx_queues_destroy(adapter);
-       adapter->eq_next_idx = 0;
-
-       adapter->be3_native = false;
-       adapter->promiscuous = false;
 
        /* tell fw we're done with firing cmds */
        be_cmd_fw_clean(adapter);
        return 0;
 }
 
+static void be_vf_setup_init(struct be_adapter *adapter)
+{
+       struct be_vf_cfg *vf_cfg;
+       int vf;
+
+       for_all_vfs(adapter, vf_cfg, vf) {
+               vf_cfg->if_handle = -1;
+               vf_cfg->pmac_id = -1;
+       }
+}
+
 static int be_vf_setup(struct be_adapter *adapter)
 {
+       struct be_vf_cfg *vf_cfg;
        u32 cap_flags, en_flags, vf;
        u16 lnk_speed;
        int status;
 
-       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
-       for (vf = 0; vf < num_vfs; vf++) {
+       be_vf_setup_init(adapter);
+
+       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+                               BE_IF_FLAGS_MULTICAST;
+       for_all_vfs(adapter, vf_cfg, vf) {
                status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
-                                       &adapter->vf_cfg[vf].vf_if_handle,
-                                       NULL, vf+1);
+                                         &vf_cfg->if_handle, NULL, vf + 1);
                if (status)
                        goto err;
-               adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
        }
 
-       if (!lancer_chip(adapter)) {
-               status = be_vf_eth_addr_config(adapter);
-               if (status)
-                       goto err;
-       }
+       status = be_vf_eth_addr_config(adapter);
+       if (status)
+               goto err;
 
-       for (vf = 0; vf < num_vfs; vf++) {
+       for_all_vfs(adapter, vf_cfg, vf) {
                status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-                               vf + 1);
+                                                 NULL, vf + 1);
                if (status)
                        goto err;
-               adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+               vf_cfg->tx_rate = lnk_speed * 10;
        }
        return 0;
 err:
        return status;
 }
 
+static void be_setup_init(struct be_adapter *adapter)
+{
+       adapter->vlan_prio_bmap = 0xff;
+       adapter->link_speed = -1;
+       adapter->if_handle = -1;
+       adapter->be3_native = false;
+       adapter->promiscuous = false;
+       adapter->eq_next_idx = 0;
+}
+
+static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+{
+       u32 pmac_id;
+       int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
+       if (status != 0)
+               goto do_none;
+       status = be_cmd_mac_addr_query(adapter, mac,
+                       MAC_ADDRESS_TYPE_NETWORK,
+                       false, adapter->if_handle, pmac_id);
+       if (status != 0)
+               goto do_none;
+       status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+                       &adapter->pmac_id, 0);
+do_none:
+       return status;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        u32 cap_flags, en_flags;
        u32 tx_fc, rx_fc;
-       int status;
+       int status, i;
        u8 mac[ETH_ALEN];
+       struct be_tx_obj *txo;
 
-       /* Allow all priorities by default. A GRP5 evt may modify this */
-       adapter->vlan_prio_bmap = 0xff;
-       adapter->link_speed = -1;
+       be_setup_init(adapter);
 
        be_cmd_req_native_mode(adapter);
 
@@ -2583,7 +2654,7 @@ static int be_setup(struct be_adapter *adapter)
 
        memset(mac, 0, ETH_ALEN);
        status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
-                       true /*permanent */, 0);
+                       true /*permanent */, 0, 0);
        if (status)
                return status;
        memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
@@ -2592,7 +2663,8 @@ static int be_setup(struct be_adapter *adapter)
        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                        BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
        cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
-                       BE_IF_FLAGS_PROMISCUOUS;
+                       BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
+
        if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
                cap_flags |= BE_IF_FLAGS_RSS;
                en_flags |= BE_IF_FLAGS_RSS;
@@ -2603,12 +2675,23 @@ static int be_setup(struct be_adapter *adapter)
        if (status != 0)
                goto err;
 
-       /* For BEx, the VF's permanent mac queried from card is incorrect.
-        * Query the mac configued by the PF using if_handle
-        */
-       if (!be_physfn(adapter) && !lancer_chip(adapter)) {
-               status = be_cmd_mac_addr_query(adapter, mac,
-                       MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+        for_all_tx_queues(adapter, txo, i) {
+               status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+               if (status)
+                       goto err;
+       }
+
+        /* The VF's permanent mac queried from card is incorrect.
+         * For BEx: Query the mac configued by the PF using if_handle
+         * For Lancer: Get and use mac_list to obtain mac address.
+         */
+       if (!be_physfn(adapter)) {
+               if (lancer_chip(adapter))
+                       status = be_configure_mac_from_list(adapter, mac);
+               else
+                       status = be_cmd_mac_addr_query(adapter, mac,
+                                       MAC_ADDRESS_TYPE_NETWORK, false,
+                                       adapter->if_handle, 0);
                if (!status) {
                        memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                        memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
@@ -2624,18 +2707,21 @@ static int be_setup(struct be_adapter *adapter)
        be_set_rx_mode(adapter->netdev);
 
        status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
-       if (status)
+       /* For Lancer: It is legal for this cmd to fail on VF */
+       if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
                goto err;
+
        if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
                status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                        adapter->rx_fc);
-               if (status)
+               /* For Lancer: It is legal for this cmd to fail on VF */
+               if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
                        goto err;
        }
 
        pcie_set_readrq(adapter->pdev, 4096);
 
-       if (be_physfn(adapter) && adapter->sriov_enabled) {
+       if (sriov_enabled(adapter)) {
                status = be_vf_setup(adapter);
                if (status)
                        goto err;
@@ -2647,6 +2733,19 @@ err:
        return status;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_rx_obj *rxo;
+       int i;
+
+       event_handle(adapter, &adapter->tx_eq, false);
+       for_all_rx_queues(adapter, rxo, i)
+               event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
 #define FW_FILE_HDR_SIGN       "ServerEngines Corp. "
 static bool be_flash_redboot(struct be_adapter *adapter,
                        const u8 *p, u32 img_start, int image_size,
@@ -2995,7 +3094,10 @@ static struct net_device_ops be_netdev_ops = {
        .ndo_set_vf_mac         = be_set_vf_mac,
        .ndo_set_vf_vlan        = be_set_vf_vlan,
        .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
-       .ndo_get_vf_config      = be_get_vf_config
+       .ndo_get_vf_config      = be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = be_netpoll,
+#endif
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -3242,6 +3344,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
                break;
        case BE_DEVICE_ID2:
        case OC_DEVICE_ID2:
+       case OC_DEVICE_ID5:
                adapter->generation = BE_GEN3;
                break;
        case OC_DEVICE_ID3:
@@ -3267,7 +3370,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
 
 static int lancer_wait_ready(struct be_adapter *adapter)
 {
-#define SLIPORT_READY_TIMEOUT 500
+#define SLIPORT_READY_TIMEOUT 30
        u32 sliport_status;
        int status = 0, i;
 
@@ -3276,7 +3379,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
                if (sliport_status & SLIPORT_STATUS_RDY_MASK)
                        break;
 
-               msleep(20);
+               msleep(1000);
        }
 
        if (i == SLIPORT_READY_TIMEOUT)
@@ -3313,6 +3416,104 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
        return status;
 }
 
+static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+{
+       int status;
+       u32 sliport_status;
+
+       if (adapter->eeh_err || adapter->ue_detected)
+               return;
+
+       sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+
+       if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+               dev_err(&adapter->pdev->dev,
+                               "Adapter in error state."
+                               "Trying to recover.\n");
+
+               status = lancer_test_and_set_rdy_state(adapter);
+               if (status)
+                       goto err;
+
+               netif_device_detach(adapter->netdev);
+
+               if (netif_running(adapter->netdev))
+                       be_close(adapter->netdev);
+
+               be_clear(adapter);
+
+               adapter->fw_timeout = false;
+
+               status = be_setup(adapter);
+               if (status)
+                       goto err;
+
+               if (netif_running(adapter->netdev)) {
+                       status = be_open(adapter->netdev);
+                       if (status)
+                               goto err;
+               }
+
+               netif_device_attach(adapter->netdev);
+
+               dev_err(&adapter->pdev->dev,
+                               "Adapter error recovery succeeded\n");
+       }
+       return;
+err:
+       dev_err(&adapter->pdev->dev,
+                       "Adapter error recovery failed\n");
+}
+
+static void be_worker(struct work_struct *work)
+{
+       struct be_adapter *adapter =
+               container_of(work, struct be_adapter, work.work);
+       struct be_rx_obj *rxo;
+       int i;
+
+       if (lancer_chip(adapter))
+               lancer_test_and_recover_fn_err(adapter);
+
+       be_detect_dump_ue(adapter);
+
+       /* when interrupts are not yet enabled, just reap any pending
+       * mcc completions */
+       if (!netif_running(adapter->netdev)) {
+               int mcc_compl, status = 0;
+
+               mcc_compl = be_process_mcc(adapter, &status);
+
+               if (mcc_compl) {
+                       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+                       be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+               }
+
+               goto reschedule;
+       }
+
+       if (!adapter->stats_cmd_sent) {
+               if (lancer_chip(adapter))
+                       lancer_cmd_get_pport_stats(adapter,
+                                               &adapter->stats_cmd);
+               else
+                       be_cmd_get_stats(adapter, &adapter->stats_cmd);
+       }
+
+       for_all_rx_queues(adapter, rxo, i) {
+               be_rx_eqd_update(adapter, rxo);
+
+               if (rxo->rx_post_starved) {
+                       rxo->rx_post_starved = false;
+                       be_post_rx_frags(rxo, GFP_KERNEL);
+               }
+       }
+
+reschedule:
+       adapter->work_counter++;
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
                        const struct pci_device_id *pdev_id)
 {
@@ -3365,7 +3566,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
                goto disable_sriov;
 
        if (lancer_chip(adapter)) {
-               status = lancer_test_and_set_rdy_state(adapter);
+               status = lancer_wait_ready(adapter);
+               if (!status) {
+                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
+                                       adapter->db + SLIPORT_CONTROL_OFFSET);
+                       status = lancer_test_and_set_rdy_state(adapter);
+               }
                if (status) {
                        dev_err(&pdev->dev, "Adapter in non recoverable error\n");
                        goto ctrl_clean;
@@ -3559,6 +3765,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
 
        dev_info(&adapter->pdev->dev, "EEH reset\n");
        adapter->eeh_err = false;
+       adapter->ue_detected = false;
+       adapter->fw_timeout = false;
 
        status = pci_enable_device(pdev);
        if (status)
index 251b635fe75a1a767ff76a21aad8c243dbf21f3c..60f0e788cc256c9887361f28c078b3c20ce64a3a 100644 (file)
@@ -1185,18 +1185,7 @@ static struct platform_driver ethoc_driver = {
        },
 };
 
-static int __init ethoc_init(void)
-{
-       return platform_driver_register(&ethoc_driver);
-}
-
-static void __exit ethoc_exit(void)
-{
-       platform_driver_unregister(&ethoc_driver);
-}
-
-module_init(ethoc_init);
-module_exit(ethoc_exit);
+module_platform_driver(ethoc_driver);
 
 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
index 61d2bddec1fa9e6c38fc5521a3962647e159f4e0..c82d444b582d52533dbbd8072ceccc0613b1a12c 100644 (file)
@@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
 {
        struct netdev_private *np = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index c520cfd3b29805440508acff0a8748b5956fee11..3574e1499dfc30db059160e80b8a77766cd8392f 100644 (file)
@@ -21,9 +21,10 @@ config NET_VENDOR_FREESCALE
 if NET_VENDOR_FREESCALE
 
 config FEC
-       bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+       tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
-                  ARCH_MXC || ARCH_MXS)
+                  ARCH_MXC || SOC_IMX28)
+       default ARCH_MXC || SOC_IMX28 if ARM
        select PHYLIB
        ---help---
          Say Y here if you want to use the built-in 10/100 Fast ethernet
index 1124ce0a15944a36dd119efae6fe47a101264b72..b0b04453c7cc16f6f1465af8a27e3d3084f16c7f 100644 (file)
@@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = {
 MODULE_DEVICE_TABLE(platform, fec_devtype);
 
 enum imx_fec_type {
-       IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
+       IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
        IMX27_FEC,      /* runs on i.mx27/35/51 */
        IMX28_FEC,
        IMX6Q_FEC,
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #elif defined (CONFIG_M5272C3)
 #define        FEC_FLASHMAC    (0xffe04000 + 4)
 #elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC   0xffc0406b
+#define FEC_FLASHMAC   0xffc0406b
 #else
 #define        FEC_FLASHMAC    0
 #endif
@@ -232,6 +232,7 @@ struct fec_enet_private {
        struct  platform_device *pdev;
 
        int     opened;
+       int     dev_id;
 
        /* Phylib and MDIO interface */
        struct  mii_bus *mii_bus;
@@ -254,11 +255,13 @@ struct fec_enet_private {
 #define FEC_MMFR_TA            (2 << 16)
 #define FEC_MMFR_DATA(v)       (v & 0xffff)
 
-#define FEC_MII_TIMEOUT                1000 /* us */
+#define FEC_MII_TIMEOUT                30000 /* us */
 
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
+static int mii_cnt;
+
 static void *swap_buffer(void *bufaddr, int len)
 {
        int i;
@@ -515,6 +518,7 @@ fec_stop(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
+       u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
 
        /* We cannot expect a graceful transmit stop without link !!! */
        if (fep->link) {
@@ -531,8 +535,10 @@ fec_stop(struct net_device *ndev)
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
        /* We have to keep ENET enabled to have MII interrupt stay working */
-       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
                writel(2, fep->hwp + FEC_ECNTRL);
+               writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+       }
 }
 
 
@@ -818,7 +824,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
                        iap = (unsigned char *)FEC_FLASHMAC;
 #else
                if (pdata)
-                       memcpy(iap, pdata->mac, ETH_ALEN);
+                       iap = (unsigned char *)&pdata->mac;
 #endif
        }
 
@@ -837,7 +843,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
 
        /* Adjust MAC if using macaddr */
        if (iap == macaddr)
-                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -865,6 +871,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
        if (phy_dev->link) {
                if (fep->full_duplex != phy_dev->duplex) {
                        fec_restart(ndev, phy_dev->duplex);
+                       /* prevent unnecessary second fec_restart() below */
+                       fep->link = phy_dev->link;
                        status_change = 1;
                }
        }
@@ -953,7 +961,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        char mdio_bus_id[MII_BUS_ID_SIZE];
        char phy_name[MII_BUS_ID_SIZE + 3];
        int phy_id;
-       int dev_id = fep->pdev->id;
+       int dev_id = fep->dev_id;
 
        fep->phy_dev = NULL;
 
@@ -972,8 +980,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        }
 
        if (phy_id >= PHY_MAX_ADDR) {
-               printk(KERN_INFO "%s: no PHY, assuming direct connection "
-                       "to switch\n", ndev->name);
+               printk(KERN_INFO
+                       "%s: no PHY, assuming direct connection to switch\n",
+                       ndev->name);
                strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
                phy_id = 0;
        }
@@ -998,8 +1007,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        fep->link = 0;
        fep->full_duplex = 0;
 
-       printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
-               "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+       printk(KERN_INFO
+               "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+               ndev->name,
                fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
                fep->phy_dev->irq);
 
@@ -1031,10 +1041,14 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * mdio interface in board design, and need to be configured by
         * fec0 mii_bus.
         */
-       if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+       if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
-               fep->mii_bus = fec0_mii_bus;
-               return 0;
+               if (mii_cnt && fec0_mii_bus) {
+                       fep->mii_bus = fec0_mii_bus;
+                       mii_cnt++;
+                       return 0;
+               }
+               return -ENOENT;
        }
 
        fep->mii_timeout = 0;
@@ -1063,7 +1077,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->read = fec_enet_mdio_read;
        fep->mii_bus->write = fec_enet_mdio_write;
        fep->mii_bus->reset = fec_enet_mdio_reset;
-       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
        fep->mii_bus->priv = fep;
        fep->mii_bus->parent = &pdev->dev;
 
@@ -1079,6 +1093,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        if (mdiobus_register(fep->mii_bus))
                goto err_out_free_mdio_irq;
 
+       mii_cnt++;
+
        /* save fec0 mii_bus */
        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
                fec0_mii_bus = fep->mii_bus;
@@ -1095,11 +1111,11 @@ err_out:
 
 static void fec_enet_mii_remove(struct fec_enet_private *fep)
 {
-       if (fep->phy_dev)
-               phy_disconnect(fep->phy_dev);
-       mdiobus_unregister(fep->mii_bus);
-       kfree(fep->mii_bus->irq);
-       mdiobus_free(fep->mii_bus);
+       if (--mii_cnt == 0) {
+               mdiobus_unregister(fep->mii_bus);
+               kfree(fep->mii_bus->irq);
+               mdiobus_free(fep->mii_bus);
+       }
 }
 
 static int fec_enet_get_settings(struct net_device *ndev,
@@ -1521,6 +1537,7 @@ fec_probe(struct platform_device *pdev)
        int i, irq, ret = 0;
        struct resource *r;
        const struct of_device_id *of_id;
+       static int dev_id;
 
        of_id = of_match_device(fec_dt_ids, &pdev->dev);
        if (of_id)
@@ -1548,6 +1565,7 @@ fec_probe(struct platform_device *pdev)
 
        fep->hwp = ioremap(r->start, resource_size(r));
        fep->pdev = pdev;
+       fep->dev_id = dev_id++;
 
        if (!fep->hwp) {
                ret = -ENOMEM;
@@ -1571,8 +1589,12 @@ fec_probe(struct platform_device *pdev)
 
        for (i = 0; i < FEC_IRQ_NUM; i++) {
                irq = platform_get_irq(pdev, i);
-               if (i && irq < 0)
-                       break;
+               if (irq < 0) {
+                       if (i)
+                               break;
+                       ret = irq;
+                       goto failed_irq;
+               }
                ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
                if (ret) {
                        while (--i >= 0) {
@@ -1583,7 +1605,7 @@ fec_probe(struct platform_device *pdev)
                }
        }
 
-       fep->clk = clk_get(&pdev->dev, "fec_clk");
+       fep->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(fep->clk)) {
                ret = PTR_ERR(fep->clk);
                goto failed_clk;
@@ -1635,13 +1657,18 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct resource *r;
+       int i;
 
-       fec_stop(ndev);
+       unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
+       for (i = 0; i < FEC_IRQ_NUM; i++) {
+               int irq = platform_get_irq(pdev, i);
+               if (irq > 0)
+                       free_irq(irq, ndev);
+       }
        clk_disable(fep->clk);
        clk_put(fep->clk);
        iounmap(fep->hwp);
-       unregister_netdev(ndev);
        free_netdev(ndev);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 5bf5471f06ffd77bf82e9d5c91a90ebb75a38216..910a8e18a9ae435807af5aa91812bba14b99d82c 100644 (file)
@@ -1171,16 +1171,6 @@ static struct platform_driver fs_enet_driver = {
        .remove = fs_enet_remove,
 };
 
-static int __init fs_init(void)
-{
-       return platform_driver_register(&fs_enet_driver);
-}
-
-static void __exit fs_cleanup(void)
-{
-       platform_driver_unregister(&fs_enet_driver);
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void fs_enet_netpoll(struct net_device *dev)
 {
@@ -1190,7 +1180,4 @@ static void fs_enet_netpoll(struct net_device *dev)
 }
 #endif
 
-/**************************************************************************************/
-
-module_init(fs_init);
-module_exit(fs_cleanup);
+module_platform_driver(fs_enet_driver);
index b09270b5d0a56fe484ac0d5a6a77ddaf995adfec..0f2d1a710909e5044b3c2c402bf2aba0b950ff88 100644 (file)
@@ -232,15 +232,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
        .remove = fs_enet_mdio_remove,
 };
 
-static int fs_enet_mdio_bb_init(void)
-{
-       return platform_driver_register(&fs_enet_bb_mdio_driver);
-}
-
-static void fs_enet_mdio_bb_exit(void)
-{
-       platform_driver_unregister(&fs_enet_bb_mdio_driver);
-}
-
-module_init(fs_enet_mdio_bb_init);
-module_exit(fs_enet_mdio_bb_exit);
+module_platform_driver(fs_enet_bb_mdio_driver);
index e0e9d6c35d83f61dee22ae9c4c8d54d82e6e538e..55bb867258e6aa1d2baa1dbf4da546c705a2e83f 100644 (file)
@@ -237,15 +237,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
        .remove = fs_enet_mdio_remove,
 };
 
-static int fs_enet_mdio_fec_init(void)
-{
-       return platform_driver_register(&fs_enet_fec_mdio_driver);
-}
-
-static void fs_enet_mdio_fec_exit(void)
-{
-       platform_driver_unregister(&fs_enet_fec_mdio_driver);
-}
-
-module_init(fs_enet_mdio_fec_init);
-module_exit(fs_enet_mdio_fec_exit);
+module_platform_driver(fs_enet_fec_mdio_driver);
index 52f4e8ad48e77c84b8bd4ed9cd52faa3f5adb419..9eb815941df56a6b207d6211c3d09ae272224dd9 100644 (file)
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
-       int i;
-
-       for (i = PHY_MAX_ADDR; i > 0; i--) {
-               u32 phy_id;
-
-               if (get_phy_id(new_bus, i, &phy_id))
-                       return -1;
-
-               if (phy_id == 0xffffffff)
-                       break;
-       }
 
-       return i;
-}
-
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
        struct gfar __iomem *enet_regs;
 
        /*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
        } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
                        of_device_is_compatible(np, "fsl,etsec2-tbi")) {
                return of_iomap(np, 1);
-       } else
-               return NULL;
-}
+       }
 #endif
+       return NULL;
+}
 
 
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
 {
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
        struct device_node *np = NULL;
        int err = 0;
 
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
                return err;
        else
                return -EINVAL;
-}
+#else
+       return -ENODEV;
 #endif
-
+}
 
 static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 {
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
                        of_device_is_compatible(np, "fsl,etsec2-mdio") ||
                        of_device_is_compatible(np, "fsl,etsec2-tbi") ||
                        of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
                tbipa = get_gfar_tbipa(regs, np);
                if (!tbipa) {
                        err = -EINVAL;
                        goto err_free_irqs;
                }
-#else
-               err = -ENODEV;
-               goto err_free_irqs;
-#endif
        } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
                        of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
                u32 id;
                static u32 mii_mng_master;
 
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
                        mii_mng_master = id;
                        ucc_set_qe_mux_mii_mng(id - 1);
                }
-#else
-               err = -ENODEV;
-               goto err_free_irqs;
-#endif
        } else {
                err = -ENODEV;
                goto err_free_irqs;
@@ -386,23 +359,12 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
        }
 
        if (tbiaddr == -1) {
-               out_be32(tbipa, 0);
-
-               tbiaddr = fsl_pq_mdio_find_free(new_bus);
-       }
-
-       /*
-        * We define TBIPA at 0 to be illegal, opting to fail for boards that
-        * have PHYs at 1-31, rather than change tbipa and rescan.
-        */
-       if (tbiaddr == 0) {
                err = -EBUSY;
-
                goto err_free_irqs;
+       } else {
+               out_be32(tbipa, tbiaddr);
        }
 
-       out_be32(tbipa, tbiaddr);
-
        err = of_mdiobus_register(new_bus, np);
        if (err) {
                printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
@@ -480,15 +442,6 @@ static struct platform_driver fsl_pq_mdio_driver = {
        .remove = fsl_pq_mdio_remove,
 };
 
-int __init fsl_pq_mdio_init(void)
-{
-       return platform_driver_register(&fsl_pq_mdio_driver);
-}
-module_init(fsl_pq_mdio_init);
+module_platform_driver(fsl_pq_mdio_driver);
 
-void fsl_pq_mdio_exit(void)
-{
-       platform_driver_unregister(&fsl_pq_mdio_driver);
-}
-module_exit(fsl_pq_mdio_exit);
 MODULE_LICENSE("GPL");
index 83199fd0d62b14df6e13ae5403feecdf4eb35af8..e01cdaa722a977eac9b6c66513dfe13a44b97a44 100644 (file)
@@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 
        mac_addr = of_get_mac_address(np);
        if (mac_addr)
-               memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
        if (model && !strcasecmp(model, "TSEC"))
                priv->device_flags =
@@ -2306,7 +2306,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
 }
 
 /* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, u32 features)
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = NULL;
@@ -3114,7 +3114,7 @@ static void gfar_set_multi(struct net_device *dev)
 static void gfar_clear_exact_match(struct net_device *dev)
 {
        int idx;
-       static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+       static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 
        for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
                gfar_set_mac_for_addr(dev, idx, zero_arr);
@@ -3137,7 +3137,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
 {
        u32 tempval;
        struct gfar_private *priv = netdev_priv(dev);
-       u32 result = ether_crc(MAC_ADDR_LEN, addr);
+       u32 result = ether_crc(ETH_ALEN, addr);
        int width = priv->hash_width;
        u8 whichbit = (result >> (32 - width)) & 0x1f;
        u8 whichreg = result >> (32 - width + 5);
@@ -3158,7 +3158,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        int idx;
-       char tmpbuf[MAC_ADDR_LEN];
+       char tmpbuf[ETH_ALEN];
        u32 tempval;
        u32 __iomem *macptr = &regs->macstnaddr1;
 
@@ -3166,8 +3166,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 
        /* Now copy it into the mac registers backwards, cuz */
        /* little endian is silly */
-       for (idx = 0; idx < MAC_ADDR_LEN; idx++)
-               tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+       for (idx = 0; idx < ETH_ALEN; idx++)
+               tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
 
        gfar_write(macptr, *((u32 *) (tmpbuf)));
 
@@ -3281,16 +3281,4 @@ static struct platform_driver gfar_driver = {
        .remove = gfar_remove,
 };
 
-static int __init gfar_init(void)
-{
-       return platform_driver_register(&gfar_driver);
-}
-
-static void __exit gfar_exit(void)
-{
-       platform_driver_unregister(&gfar_driver);
-}
-
-module_init(gfar_init);
-module_exit(gfar_exit);
-
+module_platform_driver(gfar_driver);
index 9aa43773e8e35047da95bad312d9027650714d65..fe7ac3a83194d8407f191227e710fe19a699e13a 100644 (file)
@@ -74,9 +74,6 @@ struct ethtool_rx_list {
  * will be the next highest multiple of 512 bytes. */
 #define INCREMENTAL_BUFFER_SIZE 512
 
-
-#define MAC_ADDR_LEN 6
-
 #define PHY_INIT_TIMEOUT 100000
 #define GFAR_PHY_CHANGE_TIME 2
 
@@ -1179,9 +1176,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
 extern void gfar_configure_coalescing(struct gfar_private *priv,
                unsigned long tx_mask, unsigned long rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
-int gfar_set_features(struct net_device *dev, u32 features);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
 extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index 212736bab6bb27ac8a261b9d97d0c0415e978ef2..5a3b2e5b28802d67028212996974bf04d7fd8cca 100644 (file)
@@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
        return err;
 }
 
-int gfar_set_features(struct net_device *dev, u32 features)
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct gfar_private *priv = netdev_priv(dev);
        unsigned long flags;
        int err = 0, i = 0;
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
                gfar_vlan_mode(dev, features);
@@ -1410,10 +1410,9 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
 
        /* We need a copy of the filer table because
         * we want to change its order */
-       temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+       temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
        if (temp_table == NULL)
                return -ENOMEM;
-       memcpy(temp_table, tab, sizeof(*temp_table));
 
        mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
                        sizeof(struct gfar_mask_entry), GFP_KERNEL);
@@ -1693,8 +1692,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
                ret = gfar_set_hash_opts(priv, cmd);
                break;
        case ETHTOOL_SRXCLSRLINS:
-               if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
-                       cmd->fs.ring_cookie >= priv->num_rx_queues) {
+               if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+                    cmd->fs.ring_cookie >= priv->num_rx_queues) ||
+                   cmd->fs.location >= MAX_FILER_IDX) {
                        ret = -EINVAL;
                        break;
                }
index f67b8aebc89c3d71fe30c41bd1dff1265a7c1fb1..83e0ed757e338aa14a453d2800e31c8efe9eab5a 100644 (file)
@@ -562,21 +562,7 @@ static struct platform_driver gianfar_ptp_driver = {
        .remove      = gianfar_ptp_remove,
 };
 
-/* module operations */
-
-static int __init ptp_gianfar_init(void)
-{
-       return platform_driver_register(&gianfar_ptp_driver);
-}
-
-module_init(ptp_gianfar_init);
-
-static void __exit ptp_gianfar_exit(void)
-{
-       platform_driver_unregister(&gianfar_ptp_driver);
-}
-
-module_exit(ptp_gianfar_exit);
+module_platform_driver(gianfar_ptp_driver);
 
 MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
 MODULE_DESCRIPTION("PTP clock using the eTSEC");
index b5dc0273a1d1d63f213096dfd3f1057006d0b6e2..ba2dc083bfc007fa416e3c714dd1e6350d59e99b 100644 (file)
@@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
 
 static inline int compare_addr(u8 **addr1, u8 **addr2)
 {
-       return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+       return memcmp(addr1, addr2, ETH_ALEN);
 }
 
 #ifdef DEBUG
index d12fcad145e9b2f0765caa7195e033a1adb43647..2e395a2566b8d6221747f79828cbdfb70f72947c 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/if_ether.h>
 
 #include <asm/immap_qe.h>
 #include <asm/qe.h>
@@ -881,7 +882,6 @@ struct ucc_geth_hardware_statistics {
 #define TX_RING_MOD_MASK(size)                  (size-1)
 #define RX_RING_MOD_MASK(size)                  (size-1)
 
-#define ENET_NUM_OCTETS_PER_ADDRESS             6
 #define ENET_GROUP_ADDR                         0x01   /* Group address mask
                                                           for ethernet
                                                           addresses */
@@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses {
 
 /* UCC GETH 82xx Ethernet Address Container */
 struct enet_addr_container {
-       u8 address[ENET_NUM_OCTETS_PER_ADDRESS];        /* ethernet address */
+       u8 address[ETH_ALEN];   /* ethernet address */
        enum ucc_geth_enet_address_recognition_location location;       /* location in
                                                                   82xx address
                                                                   recognition
@@ -1194,7 +1194,7 @@ struct ucc_geth_private {
        u16 cpucount[NUM_TX_QUEUES];
        u16 __iomem *p_cpucount[NUM_TX_QUEUES];
        int indAddrRegUsed[NUM_OF_PADDRS];
-       u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS];   /* ethernet address */
+       u8 paddr[NUM_OF_PADDRS][ETH_ALEN];      /* ethernet address */
        u8 numGroupAddrInHash;
        u8 numIndAddrInHash;
        u8 numIndAddrInReg;
index 15416752c13e0646ccfc3eac1435671dc9f4ac25..ee84b472cee60c377556495ff54504e8a4f6b7f8 100644 (file)
@@ -1058,9 +1058,10 @@ static void fjn_rx(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       snprintf(info->bus_info, sizeof(info->bus_info),
+               "PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
index 067c46069a113aed9e8c9ac7220e3e6516599b97..114cda7721fe5d155a46045894b386efa850c545 100644 (file)
@@ -1726,9 +1726,10 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
 static void eepro_ethtool_get_drvinfo(struct net_device *dev,
                                        struct ethtool_drvinfo *drvinfo)
 {
-       strcpy(drvinfo->driver, DRV_NAME);
-       strcpy(drvinfo->version, DRV_VERSION);
-       sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+       snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+               "ISA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops eepro_ethtool_ops = {
index 410d6a1984ed400863c7e5eabd63e7041629d101..6650068c996c48158c3b09c8975b912440e00aff 100644 (file)
@@ -61,9 +61,9 @@
 #ifdef EHEA_SMALL_QUEUES
 #define EHEA_MAX_CQE_COUNT      1023
 #define EHEA_DEF_ENTRIES_SQ     1023
-#define EHEA_DEF_ENTRIES_RQ1    4095
+#define EHEA_DEF_ENTRIES_RQ1    1023
 #define EHEA_DEF_ENTRIES_RQ2    1023
-#define EHEA_DEF_ENTRIES_RQ3    1023
+#define EHEA_DEF_ENTRIES_RQ3    511
 #else
 #define EHEA_MAX_CQE_COUNT      4080
 #define EHEA_DEF_ENTRIES_SQ     4080
index 37b70f7052b68ad9a16593cb299cf6eb380a6491..3554414eb5e289287e3a9ab174320c7e9fa1bf30 100644 (file)
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
 out_herr:
        free_page((unsigned long)cb2);
 resched:
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 }
 
 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2113,17 +2114,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2131,6 +2134,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2139,24 +2143,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
-       return;
+       return err;
 }
 
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2164,6 +2172,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2172,10 +2181,13 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
+       return err;
 }
 
 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2434,7 +2446,8 @@ static int ehea_open(struct net_device *dev)
        }
 
        mutex_unlock(&port->port_lock);
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 
        return ret;
 }
index ed79b2d3ad3ebaee10cf400606481dc3a3ea5a4e..2abce965c7bdf81105238bdc890581141b3ecb85 100644 (file)
@@ -2924,6 +2924,9 @@ static int __devexit emac_remove(struct platform_device *ofdev)
        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
                zmii_detach(dev->zmii_dev, dev->zmii_port);
 
+       busy_phy_map &= ~(1 << dev->phy.address);
+       DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
+
        mal_unregister_commac(dev->mal, &dev->commac);
        emac_put_deps(dev);
 
index b1cd41b9c61ca0e970adc2285a845505c8daf7e9..e877371680a9b1417f53b7b4c73f770b4e6c3d43 100644 (file)
@@ -735,7 +735,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
                sizeof(info->version) - 1);
 }
 
-static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ibmveth_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since the ibmveth firmware interface does not have the
@@ -838,7 +839,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        return rc1 ? rc1 : rc2;
 }
 
-static int ibmveth_set_features(struct net_device *dev, u32 features)
+static int ibmveth_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        int rx_csum = !!(features & NETIF_F_RXCSUM);
index 4326681df382ac461c24e21eb6a6ea0ee64d4426..acc31af6594a243b91d7c9f666363581f68ddf05 100644 (file)
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
 
                /* FIXME: do we need this? */
                memset(local_list, 0, sizeof(local_list));
-               memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+               memset(remote_list, 0, sizeof(remote_list));
 
                /* a 0 address marks the end of the valid entries */
                if (senddata->addr[startchunk] == 0)
index 8fd80a00b898333af18d76aa0c44bc4f22b3b8f5..075451d0207d8d8e6c7af10272a6df61785aa540 100644 (file)
@@ -371,16 +371,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
        }
 
        /* The last cycle is a tri-state, so read from the PHY. */
-       for (j = 7; j < 8; j++) {
-               for (i = 0; i < p[j].len; i++) {
-                       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-
-                       p[j].field |= ((ipg_r8(PHY_CTRL) &
-                               IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
-
-                       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
-               }
-       }
+       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+       ipg_r8(PHY_CTRL);
+       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
 }
 
 static void ipg_set_led_mode(struct net_device *dev)
index 5a2fdf7a00c8bbbe9a8f9f7a3a7abf8fd1be180f..9436397e5725ecd6efdd735ac579ff9fa268e131 100644 (file)
@@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev,
        struct ethtool_drvinfo *info)
 {
        struct nic *nic = netdev_priv(netdev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(nic->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(nic->pdev),
+               sizeof(info->bus_info));
 }
 
 #define E100_PHY_REGS 0x1C
index 2b223ac99c421c9a6413e06f996ca4efc2f45a7a..3103f0b6bf5ef02d0e6a980a2fbd4806a897e7d3 100644 (file)
@@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev,
                              struct ethtool_drvinfo *drvinfo)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
 
-       strncpy(drvinfo->driver,  e1000_driver_name, 32);
-       strncpy(drvinfo->version, e1000_driver_version, 32);
+       strlcpy(drvinfo->driver,  e1000_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, e1000_driver_version,
+               sizeof(drvinfo->version));
 
-       sprintf(firmware_version, "N/A");
-       strncpy(drvinfo->fw_version, firmware_version, 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = e1000_get_regs_len(netdev);
        drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
index 5c9a8403668b87e1bc711e654f3dad6be6b0854c..cf7e3c09447757573342aea7715993d0e8db1c60 100644 (file)
@@ -448,7 +448,6 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
 #define E1000_DEV_ID_INTEL_CE4100_GBE    0x2E6E
 
 #define NODE_ADDRESS_SIZE 6
-#define ETH_LENGTH_OF_ADDRESS 6
 
 /* MAC decode size is 128K - This is the size of BAR0 */
 #define MAC_DECODE_SIZE (128 * 1024)
index cf480b55462273d51d4272d61af4e5521d53a379..985d58943a0624a816cdc7dc3f987b5503a853d0 100644 (file)
@@ -167,9 +167,10 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                        struct sk_buff *skb);
 
 static bool e1000_vlan_used(struct e1000_adapter *adapter);
-static void e1000_vlan_mode(struct net_device *netdev, u32 features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_mode(struct net_device *netdev,
+                           netdev_features_t features);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void e1000_restore_vlan(struct e1000_adapter *adapter);
 
 #ifdef CONFIG_PM
@@ -806,7 +807,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
        }
 }
 
-static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -820,10 +822,11 @@ static u32 e1000_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                e1000_vlan_mode(netdev, features);
@@ -1182,7 +1185,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                if (global_quad_port_a != 0)
                        adapter->eeprom_wol = 0;
                else
-                       adapter->quad_port_a = 1;
+                       adapter->quad_port_a = true;
                /* Reset for multiple quad port adapters */
                if (++global_quad_port_a == 4)
                        global_quad_port_a = 0;
@@ -1676,7 +1679,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
         * need this to apply a workaround later in the send path. */
        if (hw->mac_type == e1000_82544 &&
            hw->bus_type == e1000_bus_type_pcix)
-               adapter->pcix_82544 = 1;
+               adapter->pcix_82544 = true;
 
        ew32(TCTL, tctl);
 
@@ -1999,7 +2002,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-       tx_ring->last_tx_tso = 0;
+       tx_ring->last_tx_tso = false;
 
        writel(0, hw->hw_addr + tx_ring->tdh);
        writel(0, hw->hw_addr + tx_ring->tdt);
@@ -2848,7 +2851,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                 * DMA'd to the controller */
                if (!skb->data_len && tx_ring->last_tx_tso &&
                    !skb_is_gso(skb)) {
-                       tx_ring->last_tx_tso = 0;
+                       tx_ring->last_tx_tso = false;
                        size -= 4;
                }
 
@@ -3216,7 +3219,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        if (likely(tso)) {
                if (likely(hw->mac_type != e1000_82544))
-                       tx_ring->last_tx_tso = 1;
+                       tx_ring->last_tx_tso = true;
                tx_flags |= E1000_TX_FLAGS_TSO;
        } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
                tx_flags |= E1000_TX_FLAGS_CSUM;
@@ -4577,7 +4580,8 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
                e1000_irq_enable(adapter);
 }
 
-static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+static void e1000_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4600,7 +4604,7 @@ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
                e1000_irq_enable(adapter);
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4609,7 +4613,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if ((hw->mng_cookie.status &
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
            (vid == adapter->mng_vlan_id))
-               return;
+               return 0;
 
        if (!e1000_vlan_used(adapter))
                e1000_vlan_filter_on_off(adapter, true);
@@ -4621,9 +4625,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        e1000_write_vfta(hw, index, vfta);
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4644,6 +4650,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 
        if (!e1000_vlan_used(adapter))
                e1000_vlan_filter_on_off(adapter, false);
+
+       return 0;
 }
 
 static void e1000_restore_vlan(struct e1000_adapter *adapter)
index 9fe18d1d53d8eaf34f4b2825c5a7fd9bc2c14b2b..f478a22ed57768e1f1b88207236a7b673e8b20f7 100644 (file)
@@ -309,6 +309,7 @@ struct e1000_adapter {
        u32 txd_cmd;
 
        bool detect_tx_hung;
+       bool tx_hang_recheck;
        u8 tx_timeout_factor;
 
        u32 tx_int_delay;
index 69c9d2199140a94b4f754bdc1fc43d93bd00b6ea..fb2c28e799a2d6ad80ef5eeb88d2fb6e058ec866 100644 (file)
@@ -579,26 +579,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
                              struct ethtool_drvinfo *drvinfo)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
 
-       strncpy(drvinfo->driver,  e1000e_driver_name,
-               sizeof(drvinfo->driver) - 1);
-       strncpy(drvinfo->version, e1000e_driver_version,
-               sizeof(drvinfo->version) - 1);
+       strlcpy(drvinfo->driver,  e1000e_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, e1000e_driver_version,
+               sizeof(drvinfo->version));
 
        /*
         * EEPROM image version # is reported as firmware version # for
         * PCI-E controllers
         */
-       snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d-%d",
                (adapter->eeprom_vers & 0xF000) >> 12,
                (adapter->eeprom_vers & 0x0FF0) >> 4,
                (adapter->eeprom_vers & 0x000F));
 
-       strncpy(drvinfo->fw_version, firmware_version,
-               sizeof(drvinfo->fw_version) - 1);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-               sizeof(drvinfo->bus_info) - 1);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = e1000_get_regs_len(netdev);
        drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
index a855db1ad2495b9c682191b36e82a5b875a981ab..3911401ed65d59ceafe9b28080a6630f4adf6371 100644 (file)
@@ -163,16 +163,13 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
                        regs[n] = __er32(hw, E1000_TARC(n));
                break;
        default:
-               printk(KERN_INFO "%-15s %08x\n",
-                      reginfo->name, __er32(hw, reginfo->ofs));
+               pr_info("%-15s %08x\n",
+                       reginfo->name, __er32(hw, reginfo->ofs));
                return;
        }
 
        snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
-       printk(KERN_INFO "%-15s ", rname);
-       for (n = 0; n < 2; n++)
-               printk(KERN_CONT "%08x ", regs[n]);
-       printk(KERN_CONT "\n");
+       pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
 }
 
 /*
@@ -208,16 +205,15 @@ static void e1000e_dump(struct e1000_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               printk(KERN_INFO "Device Name     state            "
-                      "trans_start      last_rx\n");
-               printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-                      netdev->name, netdev->state, netdev->trans_start,
-                      netdev->last_rx);
+               pr_info("Device Name     state            trans_start      last_rx\n");
+               pr_info("%-15s %016lX %016lX %016lX\n",
+                       netdev->name, netdev->state, netdev->trans_start,
+                       netdev->last_rx);
        }
 
        /* Print Registers */
        dev_info(&adapter->pdev->dev, "Register Dump\n");
-       printk(KERN_INFO " Register Name   Value\n");
+       pr_info(" Register Name   Value\n");
        for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
             reginfo->name; reginfo++) {
                e1000_regdump(hw, reginfo);
@@ -228,15 +224,14 @@ static void e1000e_dump(struct e1000_adapter *adapter)
                goto exit;
 
        dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-              " leng ntw timestamp\n");
+       pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
        buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
-       printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
-              0, tx_ring->next_to_use, tx_ring->next_to_clean,
-              (unsigned long long)buffer_info->dma,
-              buffer_info->length,
-              buffer_info->next_to_watch,
-              (unsigned long long)buffer_info->time_stamp);
+       pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+               0, tx_ring->next_to_use, tx_ring->next_to_clean,
+               (unsigned long long)buffer_info->dma,
+               buffer_info->length,
+               buffer_info->next_to_watch,
+               (unsigned long long)buffer_info->time_stamp);
 
        /* Print Tx Ring */
        if (!netif_msg_tx_done(adapter))
@@ -271,37 +266,32 @@ static void e1000e_dump(struct e1000_adapter *adapter)
         *   +----------------------------------------------------------------+
         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
         */
-       printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
-              " [bi->dma       ] leng  ntw timestamp        bi->skb "
-              "<-- Legacy format\n");
-       printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
-              " [bi->dma       ] leng  ntw timestamp        bi->skb "
-              "<-- Ext Context format\n");
-       printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
-              " [bi->dma       ] leng  ntw timestamp        bi->skb "
-              "<-- Ext Data format\n");
+       pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
+       pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
+       pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+               const char *next_desc;
                tx_desc = E1000_TX_DESC(*tx_ring, i);
                buffer_info = &tx_ring->buffer_info[i];
                u0 = (struct my_u0 *)tx_desc;
-               printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
-                      "%04X  %3X %016llX %p",
-                      (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
-                       ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
-                      (unsigned long long)le64_to_cpu(u0->a),
-                      (unsigned long long)le64_to_cpu(u0->b),
-                      (unsigned long long)buffer_info->dma,
-                      buffer_info->length, buffer_info->next_to_watch,
-                      (unsigned long long)buffer_info->time_stamp,
-                      buffer_info->skb);
                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
-                       printk(KERN_CONT " NTC/U\n");
+                       next_desc = " NTC/U";
                else if (i == tx_ring->next_to_use)
-                       printk(KERN_CONT " NTU\n");
+                       next_desc = " NTU";
                else if (i == tx_ring->next_to_clean)
-                       printk(KERN_CONT " NTC\n");
+                       next_desc = " NTC";
                else
-                       printk(KERN_CONT "\n");
+                       next_desc = "";
+               pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
+                       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+                        ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+                       i,
+                       (unsigned long long)le64_to_cpu(u0->a),
+                       (unsigned long long)le64_to_cpu(u0->b),
+                       (unsigned long long)buffer_info->dma,
+                       buffer_info->length, buffer_info->next_to_watch,
+                       (unsigned long long)buffer_info->time_stamp,
+                       buffer_info->skb, next_desc);
 
                if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
                        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
@@ -312,9 +302,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
        /* Print Rx Ring Summary */
 rx_ring_summary:
        dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC]\n");
-       printk(KERN_INFO " %5d %5X %5X\n", 0,
-              rx_ring->next_to_use, rx_ring->next_to_clean);
+       pr_info("Queue [NTU] [NTC]\n");
+       pr_info(" %5d %5X %5X\n",
+               0, rx_ring->next_to_use, rx_ring->next_to_clean);
 
        /* Print Rx Ring */
        if (!netif_msg_rx_status(adapter))
@@ -337,10 +327,7 @@ rx_ring_summary:
                 * 24 |                Buffer Address 3 [63:0]              |
                 *    +-----------------------------------------------------+
                 */
-               printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
-                      "[buffer 1 63:0 ] "
-                      "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
-                      "[bi->skb] <-- Ext Pkt Split format\n");
+               pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
                /* [Extended] Receive Descriptor (Write-Back) Format
                 *
                 *   63       48 47    32 31     13 12    8 7    4 3        0
@@ -352,35 +339,40 @@ rx_ring_summary:
                 *   +------------------------------------------------------+
                 *   63       48 47    32 31            20 19               0
                 */
-               printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
-                      "[vl   l0 ee  es] "
-                      "[ l3  l2  l1 hs] [reserved      ] ---------------- "
-                      "[bi->skb] <-- Ext Rx Write-Back format\n");
+               pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
                for (i = 0; i < rx_ring->count; i++) {
+                       const char *next_desc;
                        buffer_info = &rx_ring->buffer_info[i];
                        rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
                        u1 = (struct my_u1 *)rx_desc_ps;
                        staterr =
                            le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+                       if (i == rx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == rx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
                        if (staterr & E1000_RXD_STAT_DD) {
                                /* Descriptor Done */
-                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
-                                      "%016llX %016llX %016llX "
-                                      "---------------- %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      (unsigned long long)le64_to_cpu(u1->c),
-                                      (unsigned long long)le64_to_cpu(u1->d),
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+                                       "RWB", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       (unsigned long long)le64_to_cpu(u1->c),
+                                       (unsigned long long)le64_to_cpu(u1->d),
+                                       buffer_info->skb, next_desc);
                        } else {
-                               printk(KERN_INFO "R  [0x%03X]     %016llX "
-                                      "%016llX %016llX %016llX %016llX %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      (unsigned long long)le64_to_cpu(u1->c),
-                                      (unsigned long long)le64_to_cpu(u1->d),
-                                      (unsigned long long)buffer_info->dma,
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
+                                       "R  ", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       (unsigned long long)le64_to_cpu(u1->c),
+                                       (unsigned long long)le64_to_cpu(u1->d),
+                                       (unsigned long long)buffer_info->dma,
+                                       buffer_info->skb, next_desc);
 
                                if (netif_msg_pktdata(adapter))
                                        print_hex_dump(KERN_INFO, "",
@@ -388,13 +380,6 @@ rx_ring_summary:
                                                phys_to_virt(buffer_info->dma),
                                                adapter->rx_ps_bsize0, true);
                        }
-
-                       if (i == rx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == rx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
                }
                break;
        default:
@@ -407,9 +392,7 @@ rx_ring_summary:
                 * 8 |                      Reserved                       |
                 *   +-----------------------------------------------------+
                 */
-               printk(KERN_INFO "R  [desc]      [buf addr 63:0 ] "
-                      "[reserved 63:0 ] [bi->dma       ] "
-                      "[bi->skb] <-- Ext (Read) format\n");
+               pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
                /* Extended Receive Descriptor (Write-Back) Format
                 *
                 *   63       48 47    32 31    24 23            4 3        0
@@ -423,29 +406,37 @@ rx_ring_summary:
                 *   +------------------------------------------------------+
                 *   63       48 47    32 31            20 19               0
                 */
-               printk(KERN_INFO "RWB[desc]      [cs ipid    mrq] "
-                      "[vt   ln xe  xs] "
-                      "[bi->skb] <-- Ext (Write-Back) format\n");
+               pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
 
                for (i = 0; i < rx_ring->count; i++) {
+                       const char *next_desc;
+
                        buffer_info = &rx_ring->buffer_info[i];
                        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
                        u1 = (struct my_u1 *)rx_desc;
                        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+                       if (i == rx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == rx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
                        if (staterr & E1000_RXD_STAT_DD) {
                                /* Descriptor Done */
-                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
-                                      "%016llX ---------------- %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
+                                       "RWB", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       buffer_info->skb, next_desc);
                        } else {
-                               printk(KERN_INFO "R  [0x%03X]     %016llX "
-                                      "%016llX %016llX %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      (unsigned long long)buffer_info->dma,
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
+                                       "R  ", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       (unsigned long long)buffer_info->dma,
+                                       buffer_info->skb, next_desc);
 
                                if (netif_msg_pktdata(adapter))
                                        print_hex_dump(KERN_INFO, "",
@@ -456,13 +447,6 @@ rx_ring_summary:
                                                       adapter->rx_buffer_len,
                                                       true);
                        }
-
-                       if (i == rx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == rx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
                }
        }
 
@@ -875,7 +859,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
        u32 length, staterr;
        unsigned int i;
        int cleaned_count = 0;
-       bool cleaned = 0;
+       bool cleaned = false;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
@@ -904,7 +888,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                next_buffer = &rx_ring->buffer_info[i];
 
-               cleaned = 1;
+               cleaned = true;
                cleaned_count++;
                dma_unmap_single(&pdev->dev,
                                 buffer_info->dma,
@@ -1030,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work,
                                                     struct e1000_adapter,
                                                     print_hang_task);
+       struct net_device *netdev = adapter->netdev;
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int i = tx_ring->next_to_clean;
        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1041,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
        if (test_bit(__E1000_DOWN, &adapter->state))
                return;
 
+       if (!adapter->tx_hang_recheck &&
+           (adapter->flags2 & FLAG2_DMA_BURST)) {
+               /* May be block on write-back, flush and detect again
+                * flush pending descriptor writebacks to memory
+                */
+               ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+               /* execute the writes immediately */
+               e1e_flush();
+               adapter->tx_hang_recheck = true;
+               return;
+       }
+       /* Real hang detected */
+       adapter->tx_hang_recheck = false;
+       netif_stop_queue(netdev);
+
        e1e_rphy(hw, PHY_STATUS, &phy_status);
        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1095,6 +1095,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
        unsigned int i, eop;
        unsigned int count = 0;
        unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1112,6 +1113,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                        if (cleaned) {
                                total_tx_packets += buffer_info->segs;
                                total_tx_bytes += buffer_info->bytecount;
+                               if (buffer_info->skb) {
+                                       bytes_compl += buffer_info->skb->len;
+                                       pkts_compl++;
+                               }
                        }
 
                        e1000_put_txbuf(adapter, buffer_info);
@@ -1130,6 +1135,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
 
        tx_ring->next_to_clean = i;
 
+       netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
 #define TX_WAKE_THRESHOLD 32
        if (count && netif_carrier_ok(netdev) &&
            e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
@@ -1150,14 +1157,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                 * Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i
                 */
-               adapter->detect_tx_hung = 0;
+               adapter->detect_tx_hung = false;
                if (tx_ring->buffer_info[i].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
                               + (adapter->tx_timeout_factor * HZ)) &&
-                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+                   !(er32(STATUS) & E1000_STATUS_TXOFF))
                        schedule_work(&adapter->print_hang_task);
-                       netif_stop_queue(netdev);
-               }
+               else
+                       adapter->tx_hang_recheck = false;
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
@@ -1185,7 +1192,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
        unsigned int i, j;
        u32 length, staterr;
        int cleaned_count = 0;
-       bool cleaned = 0;
+       bool cleaned = false;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
@@ -1211,7 +1218,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 
                next_buffer = &rx_ring->buffer_info[i];
 
-               cleaned = 1;
+               cleaned = true;
                cleaned_count++;
                dma_unmap_single(&pdev->dev, buffer_info->dma,
                                 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
@@ -1222,8 +1229,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                        adapter->flags2 |= FLAG2_IS_DISCARDING;
 
                if (adapter->flags2 & FLAG2_IS_DISCARDING) {
-                       e_dbg("Packet Split buffers didn't pick up the full "
-                             "packet\n");
+                       e_dbg("Packet Split buffers didn't pick up the full packet\n");
                        dev_kfree_skb_irq(skb);
                        if (staterr & E1000_RXD_STAT_EOP)
                                adapter->flags2 &= ~FLAG2_IS_DISCARDING;
@@ -1238,8 +1244,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                length = le16_to_cpu(rx_desc->wb.middle.length0);
 
                if (!length) {
-                       e_dbg("Last part of the packet spanning multiple "
-                             "descriptors\n");
+                       e_dbg("Last part of the packet spanning multiple descriptors\n");
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
@@ -1917,8 +1922,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                                        return;
                        }
                        /* MSI-X failed, so fall through and try MSI */
-                       e_err("Failed to initialize MSI-X interrupts.  "
-                             "Falling back to MSI interrupts.\n");
+                       e_err("Failed to initialize MSI-X interrupts.  Falling back to MSI interrupts.\n");
                        e1000e_reset_interrupt_capability(adapter);
                }
                adapter->int_mode = E1000E_INT_MODE_MSI;
@@ -1928,8 +1932,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                        adapter->flags |= FLAG_MSI_ENABLED;
                } else {
                        adapter->int_mode = E1000E_INT_MODE_LEGACY;
-                       e_err("Failed to initialize MSI interrupts.  Falling "
-                             "back to legacy interrupts.\n");
+                       e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
                }
                /* Fall through */
        case E1000E_INT_MODE_LEGACY:
@@ -2260,6 +2263,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
                e1000_put_txbuf(adapter, buffer_info);
        }
 
+       netdev_reset_queue(adapter->netdev);
        size = sizeof(struct e1000_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
@@ -2518,7 +2522,7 @@ clean_rx:
        return work_done;
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -2528,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if ((adapter->hw.mng_cookie.status &
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
            (vid == adapter->mng_vlan_id))
-               return;
+               return 0;
 
        /* add VID to filter table */
        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2539,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        }
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -2552,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
            (vid == adapter->mng_vlan_id)) {
                /* release control to f/w */
                e1000e_release_hw_control(adapter);
-               return;
+               return 0;
        }
 
        /* remove VID from filter table */
@@ -2564,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        }
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 /**
@@ -3113,79 +3121,147 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 }
 
 /**
- *  e1000_update_mc_addr_list - Update Multicast addresses
- *  @hw: pointer to the HW structure
- *  @mc_addr_list: array of multicast addresses to program
- *  @mc_addr_count: number of multicast addresses to program
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       struct netdev_hw_addr *ha;
+       u8 *mta_list;
+       int i;
+
+       if (netdev_mc_empty(netdev)) {
+               /* nothing to program, so clear mc list */
+               hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+               return 0;
+       }
+
+       mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+       if (!mta_list)
+               return -ENOMEM;
+
+       /* update_mc_addr_list expects a packed array of only addresses. */
+       i = 0;
+       netdev_for_each_mc_addr(ha, netdev)
+               memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+       hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+       kfree(mta_list);
+
+       return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
  *
- *  Updates the Multicast Table Array.
- *  The caller must have a packed mc_addr_list of multicast addresses.
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
  **/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
-                                     u32 mc_addr_count)
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
 {
-       hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned int rar_entries = hw->mac.rar_entry_count;
+       int count = 0;
+
+       /* save a rar entry for our hardware address */
+       rar_entries--;
+
+       /* save a rar entry for the LAA workaround */
+       if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+               rar_entries--;
+
+       /* return ENOMEM indicating insufficient memory for addresses */
+       if (netdev_uc_count(netdev) > rar_entries)
+               return -ENOMEM;
+
+       if (!netdev_uc_empty(netdev) && rar_entries) {
+               struct netdev_hw_addr *ha;
+
+               /*
+                * write the addresses in reverse order to avoid write
+                * combining
+                */
+               netdev_for_each_uc_addr(ha, netdev) {
+                       if (!rar_entries)
+                               break;
+                       e1000e_rar_set(hw, ha->addr, rar_entries--);
+                       count++;
+               }
+       }
+
+       /* zero out the remaining RAR entries not used above */
+       for (; rar_entries > 0; rar_entries--) {
+               ew32(RAH(rar_entries), 0);
+               ew32(RAL(rar_entries), 0);
+       }
+       e1e_flush();
+
+       return count;
 }
 
 /**
- * e1000_set_multi - Multicast and Promiscuous mode set
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
  * @netdev: network interface device structure
  *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
  * promiscuous mode, and all-multi behavior.
  **/
-static void e1000_set_multi(struct net_device *netdev)
+static void e1000e_set_rx_mode(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       struct netdev_hw_addr *ha;
-       u8  *mta_list;
        u32 rctl;
 
        /* Check for Promiscuous and All Multicast modes */
-
        rctl = er32(RCTL);
 
+       /* clear the affected bits */
+       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
        if (netdev->flags & IFF_PROMISC) {
                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-               rctl &= ~E1000_RCTL_VFE;
                /* Do not hardware filter VLANs in promisc mode */
                e1000e_vlan_filter_disable(adapter);
        } else {
+               int count;
                if (netdev->flags & IFF_ALLMULTI) {
                        rctl |= E1000_RCTL_MPE;
-                       rctl &= ~E1000_RCTL_UPE;
                } else {
-                       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+                       /*
+                        * Write addresses to the MTA, if the attempt fails
+                        * then we should just turn on promiscuous mode so
+                        * that we can at least receive multicast traffic
+                        */
+                       count = e1000e_write_mc_addr_list(netdev);
+                       if (count < 0)
+                               rctl |= E1000_RCTL_MPE;
                }
                e1000e_vlan_filter_enable(adapter);
-       }
-
-       ew32(RCTL, rctl);
-
-       if (!netdev_mc_empty(netdev)) {
-               int i = 0;
-
-               mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
-               if (!mta_list)
-                       return;
-
-               /* prepare a packed array of only addresses. */
-               netdev_for_each_mc_addr(ha, netdev)
-                       memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
-               e1000_update_mc_addr_list(hw, mta_list, i);
-               kfree(mta_list);
-       } else {
                /*
-                * if we're called from probe, we might not have
-                * anything to do here, so clear out the list
+                * Write addresses to available RAR registers, if there is not
+                * sufficient space to store all the addresses then enable
+                * unicast promiscuous mode
                 */
-               e1000_update_mc_addr_list(hw, NULL, 0);
+               count = e1000e_write_uc_addr_list(netdev);
+               if (count < 0)
+                       rctl |= E1000_RCTL_UPE;
        }
 
+       ew32(RCTL, rctl);
+
        if (netdev->features & NETIF_F_HW_VLAN_RX)
                e1000e_vlan_strip_enable(adapter);
        else
@@ -3198,7 +3274,7 @@ static void e1000_set_multi(struct net_device *netdev)
  **/
 static void e1000_configure(struct e1000_adapter *adapter)
 {
-       e1000_set_multi(adapter->netdev);
+       e1000e_set_rx_mode(adapter->netdev);
 
        e1000_restore_vlan(adapter);
        e1000_init_manageability_pt(adapter);
@@ -3444,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter)
 
        clear_bit(__E1000_DOWN, &adapter->state);
 
-       napi_enable(&adapter->napi);
        if (adapter->msix_entries)
                e1000_configure_msix(adapter);
        e1000_irq_enable(adapter);
@@ -3506,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter)
        e1e_flush();
        usleep_range(10000, 20000);
 
-       napi_disable(&adapter->napi);
        e1000_irq_disable(adapter);
 
        del_timer_sync(&adapter->watchdog_timer);
@@ -3782,6 +3856,7 @@ static int e1000_open(struct net_device *netdev)
 
        e1000_irq_enable(adapter);
 
+       adapter->tx_hang_recheck = false;
        netif_start_queue(netdev);
 
        adapter->idle_check = true;
@@ -3828,6 +3903,8 @@ static int e1000_close(struct net_device *netdev)
 
        pm_runtime_get_sync(&pdev->dev);
 
+       napi_disable(&adapter->napi);
+
        if (!test_bit(__E1000_DOWN, &adapter->state)) {
                e1000e_down(adapter);
                e1000_free_irq(adapter);
@@ -4168,22 +4245,19 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
        u32 ctrl = er32(CTRL);
 
        /* Link status message must follow this format for user tools */
-       printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
-              "Flow Control: %s\n",
-              adapter->netdev->name,
-              adapter->link_speed,
-              (adapter->link_duplex == FULL_DUPLEX) ?
-              "Full Duplex" : "Half Duplex",
-              ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
-              "Rx/Tx" :
-              ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
-               ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+       printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+               adapter->netdev->name,
+               adapter->link_speed,
+               adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+               (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+               (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+               (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
 }
 
 static bool e1000e_has_link(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       bool link_active = 0;
+       bool link_active = false;
        s32 ret_val = 0;
 
        /*
@@ -4198,7 +4272,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
                        ret_val = hw->mac.ops.check_for_link(hw);
                        link_active = !hw->mac.get_link_status;
                } else {
-                       link_active = 1;
+                       link_active = true;
                }
                break;
        case e1000_media_type_fiber:
@@ -4297,7 +4371,7 @@ static void e1000_watchdog_task(struct work_struct *work)
 
        if (link) {
                if (!netif_carrier_ok(netdev)) {
-                       bool txb2b = 1;
+                       bool txb2b = true;
 
                        /* Cancel scheduled suspend requests. */
                        pm_runtime_resume(netdev->dev.parent);
@@ -4323,21 +4397,18 @@ static void e1000_watchdog_task(struct work_struct *work)
                                e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
 
                                if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
-                                       e_info("Autonegotiated half duplex but"
-                                              " link partner cannot autoneg. "
-                                              " Try forcing full duplex if "
-                                              "link gets many collisions.\n");
+                                       e_info("Autonegotiated half duplex but link partner cannot autoneg.  Try forcing full duplex if link gets many collisions.\n");
                        }
 
                        /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
-                               txb2b = 0;
+                               txb2b = false;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
-                               txb2b = 0;
+                               txb2b = false;
                                adapter->tx_timeout_factor = 10;
                                break;
                        }
@@ -4473,7 +4544,7 @@ link_up:
        e1000e_flush_descriptors(adapter);
 
        /* Force detection of hung controller every watchdog period */
-       adapter->detect_tx_hung = 1;
+       adapter->detect_tx_hung = true;
 
        /*
         * With 82571 controllers, LAA may be overwritten due to controller
@@ -4985,6 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        /* if count is 0 then mapping error has occurred */
        count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
        if (count) {
+               netdev_sent_queue(netdev, skb->len);
                e1000_tx_queue(adapter, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
                e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -5110,8 +5182,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        if ((adapter->hw.mac.type == e1000_pch2lan) &&
            !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
            (new_mtu > ETH_DATA_LEN)) {
-               e_err("Jumbo Frames not supported on 82579 when CRC "
-                     "stripping is disabled.\n");
+               e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
                return -EINVAL;
        }
 
@@ -5331,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
 
        if (wufc) {
                e1000_setup_rctl(adapter);
-               e1000_set_multi(netdev);
+               e1000e_set_rx_mode(netdev);
 
                /* turn on all-multi mode if wake on multicast is enabled */
                if (wufc & E1000_WUFC_MC) {
@@ -5527,8 +5598,8 @@ static int __e1000_resume(struct pci_dev *pdev)
                                phy_data & E1000_WUS_MC ? "Multicast Packet" :
                                phy_data & E1000_WUS_BC ? "Broadcast Packet" :
                                phy_data & E1000_WUS_MAG ? "Magic Packet" :
-                               phy_data & E1000_WUS_LNKC ? "Link Status "
-                               " Change" : "other");
+                               phy_data & E1000_WUS_LNKC ?
+                               "Link Status Change" : "other");
                }
                e1e_wphy(&adapter->hw, BM_WUS, ~0);
        } else {
@@ -5859,10 +5930,11 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
        }
 }
 
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
                adapter->flags |= FLAG_TSO_FORCE;
@@ -5884,7 +5956,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
        .ndo_stop               = e1000_close,
        .ndo_start_xmit         = e1000_xmit_frame,
        .ndo_get_stats64        = e1000e_get_stats64,
-       .ndo_set_rx_mode        = e1000_set_multi,
+       .ndo_set_rx_mode        = e1000e_set_rx_mode,
        .ndo_set_mac_address    = e1000_set_mac,
        .ndo_change_mtu         = e1000_change_mtu,
        .ndo_do_ioctl           = e1000_ioctl,
@@ -5949,8 +6021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                        err = dma_set_coherent_mask(&pdev->dev,
                                                    DMA_BIT_MASK(32));
                        if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
+                               dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
                                goto err_dma;
                        }
                }
@@ -6076,6 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                                  NETIF_F_TSO6 |
                                  NETIF_F_HW_CSUM);
 
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+
        if (pci_using_dac) {
                netdev->features |= NETIF_F_HIGHDMA;
                netdev->vlan_features |= NETIF_F_HIGHDMA;
@@ -6135,7 +6208,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        /* Initialize link parameters. User can change them with ethtool */
        adapter->hw.mac.autoneg = 1;
-       adapter->fc_autoneg = 1;
+       adapter->fc_autoneg = true;
        adapter->hw.fc.requested_mode = e1000_fc_default;
        adapter->hw.fc.current_mode = e1000_fc_default;
        adapter->hw.phy.autoneg_advertised = 0x2f;
index 7881fb95a25ba51d6986621e125e0cadc4f09768..b8e20f037d0a8314789f236a31b4f723ef549b72 100644 (file)
@@ -29,6 +29,8 @@
  * e1000_82576
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/if_ether.h>
 
@@ -244,8 +246,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
         * Check for invalid size
         */
        if ((hw->mac.type == e1000_82576) && (size > 15)) {
-               printk("igb: The NVM size is not valid, "
-                       "defaulting to 32K.\n");
+               pr_notice("The NVM size is not valid, defaulting to 32K\n");
                size = 15;
        }
        nvm->word_size = 1 << size;
index 43873eba2f63eaa157980b078e14c6d56f6d2885..f1206be4e71d85be56e5157efa43a59cbf63f18e 100644 (file)
@@ -148,7 +148,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                                   SUPPORTED_1000baseT_Full|
                                   SUPPORTED_Autoneg |
                                   SUPPORTED_TP);
-               ecmd->advertising = ADVERTISED_TP;
+               ecmd->advertising = (ADVERTISED_TP |
+                                    ADVERTISED_Pause);
 
                if (hw->mac.autoneg == 1) {
                        ecmd->advertising |= ADVERTISED_Autoneg;
@@ -165,7 +166,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
                ecmd->advertising = (ADVERTISED_1000baseT_Full |
                                     ADVERTISED_FIBRE |
-                                    ADVERTISED_Autoneg);
+                                    ADVERTISED_Autoneg |
+                                    ADVERTISED_Pause);
 
                ecmd->port = PORT_FIBRE;
        }
@@ -673,25 +675,22 @@ static void igb_get_drvinfo(struct net_device *netdev,
                            struct ethtool_drvinfo *drvinfo)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
        u16 eeprom_data;
 
-       strncpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver) - 1);
-       strncpy(drvinfo->version, igb_driver_version,
-               sizeof(drvinfo->version) - 1);
+       strlcpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
 
        /* EEPROM image version # is reported as firmware version # for
         * 82575 controllers */
        adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
-       sprintf(firmware_version, "%d.%d-%d",
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d-%d",
                (eeprom_data & 0xF000) >> 12,
                (eeprom_data & 0x0FF0) >> 4,
                eeprom_data & 0x000F);
 
-       strncpy(drvinfo->fw_version, firmware_version,
-               sizeof(drvinfo->fw_version) - 1);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-               sizeof(drvinfo->bus_info) - 1);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = IGB_STATS_LEN;
        drvinfo->testinfo_len = IGB_TEST_LEN;
        drvinfo->regdump_len = igb_get_regs_len(netdev);
index ced544499f1b0a8a3e7625dd88719bbc5592563f..89d576ce57763fac0cc69e1dacefefabf5d8508f 100644 (file)
@@ -25,6 +25,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -145,9 +147,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, u32 features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
 static void igb_restore_vlan(struct igb_adapter *);
 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
@@ -325,16 +327,13 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
                        regs[n] = rd32(E1000_TXDCTL(n));
                break;
        default:
-               printk(KERN_INFO "%-15s %08x\n",
-                       reginfo->name, rd32(reginfo->ofs));
+               pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
                return;
        }
 
        snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
-       printk(KERN_INFO "%-15s ", rname);
-       for (n = 0; n < 4; n++)
-               printk(KERN_CONT "%08x ", regs[n]);
-       printk(KERN_CONT "\n");
+       pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+               regs[2], regs[3]);
 }
 
 /*
@@ -359,18 +358,15 @@ static void igb_dump(struct igb_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               printk(KERN_INFO "Device Name     state            "
-                       "trans_start      last_rx\n");
-               printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-               netdev->name,
-               netdev->state,
-               netdev->trans_start,
-               netdev->last_rx);
+               pr_info("Device Name     state            trans_start      "
+                       "last_rx\n");
+               pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+                       netdev->state, netdev->trans_start, netdev->last_rx);
        }
 
        /* Print Registers */
        dev_info(&adapter->pdev->dev, "Register Dump\n");
-       printk(KERN_INFO " Register Name   Value\n");
+       pr_info(" Register Name   Value\n");
        for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
             reginfo->name; reginfo++) {
                igb_regdump(hw, reginfo);
@@ -381,18 +377,17 @@ static void igb_dump(struct igb_adapter *adapter)
                goto exit;
 
        dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-               " leng ntw timestamp\n");
+       pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
        for (n = 0; n < adapter->num_tx_queues; n++) {
                struct igb_tx_buffer *buffer_info;
                tx_ring = adapter->tx_ring[n];
                buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
-               printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
-                          n, tx_ring->next_to_use, tx_ring->next_to_clean,
-                          (u64)buffer_info->dma,
-                          buffer_info->length,
-                          buffer_info->next_to_watch,
-                          (u64)buffer_info->time_stamp);
+               pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+                       n, tx_ring->next_to_use, tx_ring->next_to_clean,
+                       (u64)buffer_info->dma,
+                       buffer_info->length,
+                       buffer_info->next_to_watch,
+                       (u64)buffer_info->time_stamp);
        }
 
        /* Print TX Rings */
@@ -414,36 +409,38 @@ static void igb_dump(struct igb_adapter *adapter)
 
        for (n = 0; n < adapter->num_tx_queues; n++) {
                tx_ring = adapter->tx_ring[n];
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "T [desc]     [address 63:0  ] "
-                       "[PlPOCIStDDM Ln] [bi->dma       ] "
-                       "leng  ntw timestamp        bi->skb\n");
+               pr_info("------------------------------------\n");
+               pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+               pr_info("------------------------------------\n");
+               pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
+                       "[bi->dma       ] leng  ntw timestamp        "
+                       "bi->skb\n");
 
                for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+                       const char *next_desc;
                        struct igb_tx_buffer *buffer_info;
                        tx_desc = IGB_TX_DESC(tx_ring, i);
                        buffer_info = &tx_ring->tx_buffer_info[i];
                        u0 = (struct my_u0 *)tx_desc;
-                       printk(KERN_INFO "T [0x%03X]    %016llX %016llX %016llX"
-                               " %04X  %p %016llX %p", i,
+                       if (i == tx_ring->next_to_use &&
+                           i == tx_ring->next_to_clean)
+                               next_desc = " NTC/U";
+                       else if (i == tx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == tx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
+                       pr_info("T [0x%03X]    %016llX %016llX %016llX"
+                               " %04X  %p %016llX %p%s\n", i,
                                le64_to_cpu(u0->a),
                                le64_to_cpu(u0->b),
                                (u64)buffer_info->dma,
                                buffer_info->length,
                                buffer_info->next_to_watch,
                                (u64)buffer_info->time_stamp,
-                               buffer_info->skb);
-                       if (i == tx_ring->next_to_use &&
-                               i == tx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC/U\n");
-                       else if (i == tx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == tx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
+                               buffer_info->skb, next_desc);
 
                        if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
                                print_hex_dump(KERN_INFO, "",
@@ -456,11 +453,11 @@ static void igb_dump(struct igb_adapter *adapter)
        /* Print RX Rings Summary */
 rx_ring_summary:
        dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC]\n");
+       pr_info("Queue [NTU] [NTC]\n");
        for (n = 0; n < adapter->num_rx_queues; n++) {
                rx_ring = adapter->rx_ring[n];
-               printk(KERN_INFO " %5d %5X %5X\n", n,
-                          rx_ring->next_to_use, rx_ring->next_to_clean);
+               pr_info(" %5d %5X %5X\n",
+                       n, rx_ring->next_to_use, rx_ring->next_to_clean);
        }
 
        /* Print RX Rings */
@@ -492,36 +489,43 @@ rx_ring_summary:
 
        for (n = 0; n < adapter->num_rx_queues; n++) {
                rx_ring = adapter->rx_ring[n];
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "R  [desc]      [ PktBuf     A0] "
-                       "[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
-                       "<-- Adv Rx Read format\n");
-               printk(KERN_INFO "RWB[desc]      [PcsmIpSHl PtRs] "
-                       "[vl er S cks ln] ---------------- [bi->skb] "
-                       "<-- Adv Rx Write-Back format\n");
+               pr_info("------------------------------------\n");
+               pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+               pr_info("------------------------------------\n");
+               pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
+                       "[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
+               pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+                       "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
 
                for (i = 0; i < rx_ring->count; i++) {
+                       const char *next_desc;
                        struct igb_rx_buffer *buffer_info;
                        buffer_info = &rx_ring->rx_buffer_info[i];
                        rx_desc = IGB_RX_DESC(rx_ring, i);
                        u0 = (struct my_u0 *)rx_desc;
                        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+                       if (i == rx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == rx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
                        if (staterr & E1000_RXD_STAT_DD) {
                                /* Descriptor Done */
-                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
-                                       "%016llX ---------------- %p", i,
+                               pr_info("%s[0x%03X]     %016llX %016llX -------"
+                                       "--------- %p%s\n", "RWB", i,
                                        le64_to_cpu(u0->a),
                                        le64_to_cpu(u0->b),
-                                       buffer_info->skb);
+                                       buffer_info->skb, next_desc);
                        } else {
-                               printk(KERN_INFO "R  [0x%03X]     %016llX "
-                                       "%016llX %016llX %p", i,
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX"
+                                       " %p%s\n", "R  ", i,
                                        le64_to_cpu(u0->a),
                                        le64_to_cpu(u0->b),
                                        (u64)buffer_info->dma,
-                                       buffer_info->skb);
+                                       buffer_info->skb, next_desc);
 
                                if (netif_msg_pktdata(adapter)) {
                                        print_hex_dump(KERN_INFO, "",
@@ -538,14 +542,6 @@ rx_ring_summary:
                                          PAGE_SIZE/2, true);
                                }
                        }
-
-                       if (i == rx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == rx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
-
                }
        }
 
@@ -599,10 +595,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
 static int __init igb_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "%s - version %s\n",
+       pr_info("%s - version %s\n",
               igb_driver_string, igb_driver_version);
 
-       printk(KERN_INFO "%s\n", igb_copyright);
+       pr_info("%s\n", igb_copyright);
 
 #ifdef CONFIG_IGB_DCA
        dca_register_notify(&dca_notifier);
@@ -1742,7 +1738,8 @@ void igb_reset(struct igb_adapter *adapter)
        igb_get_phy_info(hw);
 }
 
-static u32 igb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t igb_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -1756,9 +1753,10 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int igb_set_features(struct net_device *netdev, u32 features)
+static int igb_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                igb_vlan_mode(netdev, features);
@@ -3640,23 +3638,23 @@ static void igb_watchdog_task(struct work_struct *work)
 
                        ctrl = rd32(E1000_CTRL);
                        /* Links status message must follow this format */
-                       printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
-                                "Flow Control: %s\n",
+                       printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
+                              "Duplex, Flow Control: %s\n",
                               netdev->name,
                               adapter->link_speed,
                               adapter->link_duplex == FULL_DUPLEX ?
-                                "Full Duplex" : "Half Duplex",
-                              ((ctrl & E1000_CTRL_TFCE) &&
-                               (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
-                              ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
-                              ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
+                              "Full" : "Half",
+                              (ctrl & E1000_CTRL_TFCE) &&
+                              (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+                              (ctrl & E1000_CTRL_RFCE) ?  "RX" :
+                              (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
 
                        /* check for thermal sensor event */
-                       if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
-                               printk(KERN_INFO "igb: %s The network adapter "
-                                                "link speed was downshifted "
-                                                "because it overheated.\n",
-                                                netdev->name);
+                       if (igb_thermal_sensor_event(hw,
+                           E1000_THSTAT_LINK_THROTTLE)) {
+                               netdev_info(netdev, "The network adapter link "
+                                           "speed was downshifted because it "
+                                           "overheated\n");
                        }
 
                        /* adjust timeout factor according to speed/duplex */
@@ -3686,11 +3684,10 @@ static void igb_watchdog_task(struct work_struct *work)
                        adapter->link_duplex = 0;
 
                        /* check for thermal sensor event */
-                       if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
-                               printk(KERN_ERR "igb: %s The network adapter "
-                                               "was stopped because it "
-                                               "overheated.\n",
-                                               netdev->name);
+                       if (igb_thermal_sensor_event(hw,
+                           E1000_THSTAT_PWR_DOWN)) {
+                               netdev_err(netdev, "The network adapter was "
+                                          "stopped because it overheated\n");
                        }
 
                        /* Links status message must follow this format */
@@ -6138,7 +6135,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
                return true;
 
        if (!page) {
-               page = netdev_alloc_page(rx_ring->netdev);
+               page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                bi->page = page;
                if (unlikely(!page)) {
                        rx_ring->rx_stats.alloc_failed++;
@@ -6467,7 +6464,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
        return 0;
 }
 
-static void igb_vlan_mode(struct net_device *netdev, u32 features)
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6494,7 +6491,7 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
        igb_rlpml_set(adapter);
 }
 
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6507,9 +6504,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        igb_vfta_set(hw, vid, true);
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6524,6 +6523,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
                igb_vfta_set(hw, vid, false);
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -7064,15 +7065,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                        wr32(E1000_DMCTXTH, 0);
 
                        /*
-                        * DMA Coalescing high water mark needs to be higher
-                        * than the RX threshold. set hwm to PBA -  2 * max
-                        * frame size
+                        * DMA Coalescing high water mark needs to be greater
+                        * than the Rx threshold. Set hwm to PBA - max frame
+                        * size in 16B units, capping it at PBA - 6KB.
                         */
-                       hwm = pba - (2 * adapter->max_frame_size);
+                       hwm = 64 * pba - adapter->max_frame_size / 16;
+                       if (hwm < 64 * (pba - 6))
+                               hwm = 64 * (pba - 6);
+                       reg = rd32(E1000_FCRTC);
+                       reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+                       reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+                               & E1000_FCRTC_RTH_COAL_MASK);
+                       wr32(E1000_FCRTC, reg);
+
+                       /*
+                        * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+                        * frame size, capping it at PBA - 10KB.
+                        */
+                       dmac_thr = pba - adapter->max_frame_size / 512;
+                       if (dmac_thr < pba - 10)
+                               dmac_thr = pba - 10;
                        reg = rd32(E1000_DMACR);
                        reg &= ~E1000_DMACR_DMACTHR_MASK;
-                       dmac_thr = pba - 4;
-
                        reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
                                & E1000_DMACR_DMACTHR_MASK);
 
@@ -7088,7 +7102,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                         * coalescing(smart fifb)-UTRESH=0
                         */
                        wr32(E1000_DMCRTRH, 0);
-                       wr32(E1000_FCRTC, hwm);
 
                        reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
 
index 2c25858cc0ff8f26b8000333caf832662edf5fff..7b600a1f6366f811d31ae38fd12cab1c6d452505 100644 (file)
@@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
                               struct ethtool_drvinfo *drvinfo)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32] = "N/A";
 
-       strncpy(drvinfo->driver,  igbvf_driver_name, 32);
-       strncpy(drvinfo->version, igbvf_driver_version, 32);
-       strncpy(drvinfo->fw_version, firmware_version, 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  igbvf_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, igbvf_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = igbvf_get_regs_len(netdev);
        drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
 }
index cca78124be316b12855a5367425cfd4251df3d55..fd3da3076c2f3bc6b10032defa1a838d0b5c59ef 100644 (file)
@@ -25,6 +25,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -1174,18 +1176,20 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
        e1000_rlpml_set_vf(hw, max_frame_size);
 }
 
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if (hw->mac.ops.set_vfta(hw, vid, true))
+       if (hw->mac.ops.set_vfta(hw, vid, true)) {
                dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
-       else
-               set_bit(vid, adapter->active_vlans);
+               return -EINVAL;
+       }
+       set_bit(vid, adapter->active_vlans);
+       return 0;
 }
 
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -1195,11 +1199,13 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (!test_bit(__IGBVF_DOWN, &adapter->state))
                igbvf_irq_enable(adapter);
 
-       if (hw->mac.ops.set_vfta(hw, vid, false))
+       if (hw->mac.ops.set_vfta(hw, vid, false)) {
                dev_err(&adapter->pdev->dev,
                        "Failed to remove vlan id %d\n", vid);
-       else
-               clear_bit(vid, adapter->active_vlans);
+               return -EINVAL;
+       }
+       clear_bit(vid, adapter->active_vlans);
+       return 0;
 }
 
 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
@@ -1746,10 +1752,9 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
 
 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
 {
-       dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
-                adapter->link_speed,
-                ((adapter->link_duplex == FULL_DUPLEX) ?
-                 "Full Duplex" : "Half Duplex"));
+       dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
+                adapter->link_speed,
+                adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
 }
 
 static bool igbvf_has_link(struct igbvf_adapter *adapter)
@@ -2532,7 +2537,8 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
        dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
 }
 
-static int igbvf_set_features(struct net_device *netdev, u32 features)
+static int igbvf_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -2842,9 +2848,8 @@ static struct pci_driver igbvf_driver = {
 static int __init igbvf_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "%s - version %s\n",
-              igbvf_driver_string, igbvf_driver_version);
-       printk(KERN_INFO "%s\n", igbvf_copyright);
+       pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+       pr_info("%s\n", igbvf_copyright);
 
        ret = pci_register_driver(&igbvf_driver);
 
index 9dfce7dff79b8e54c8c710e33f9b7ce1f503b7ea..dbb7dd2f8e360e4d6c1013182e9d667a19d7fa56 100644 (file)
@@ -473,10 +473,12 @@ ixgb_get_drvinfo(struct net_device *netdev,
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
 
-       strncpy(drvinfo->driver,  ixgb_driver_name, 32);
-       strncpy(drvinfo->version, ixgb_driver_version, 32);
-       strncpy(drvinfo->fw_version, "N/A", 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  ixgb_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ixgb_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = IXGB_STATS_LEN;
        drvinfo->regdump_len = ixgb_get_regs_len(netdev);
        drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
index e21148f8b1607d02b9054435e3e013c9f6a75374..9bd5faf64a85c36a9ad7438cd98588354a88b556 100644 (file)
@@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
 
 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter)
        if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
                err = pci_enable_msi(adapter->pdev);
                if (!err) {
-                       adapter->have_msi = 1;
+                       adapter->have_msi = true;
                        irq_flags = 0;
                }
                /* proceed to try to request regular interrupt */
@@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter)
        }
 }
 
-static u32
-ixgb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
 {
        /*
         * Tx VLAN insertion does not work per HW design when Rx stripping is
@@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, u32 features)
 }
 
 static int
-ixgb_set_features(struct net_device *netdev, u32 features)
+ixgb_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
                return 0;
@@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
 }
 
-static void
+static int
 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        vfta |= (1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void
+static int
 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        vfta &= ~(1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void
index 4ae26a748da0f325514a52ccbd2804d7221a1c2f..772072147bea6fac57d0a6d0a0056fd6d42a78d0 100644 (file)
@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_SFP_FCOE:
        case IXGBE_DEV_ID_82599_SFP_EM:
        case IXGBE_DEV_ID_82599_SFP_SF2:
+       case IXGBE_DEV_ID_82599_SFP_SF_QP:
        case IXGBE_DEV_ID_82599EN_SFP:
                media_type = ixgbe_media_type_fiber;
                break;
index f1365fef4ed2b7b4e9e3050dce1715ac41c63edc..a3aa6333073f0727b67d5c4bb0ea4fe0ef31e6ee 100644 (file)
@@ -266,10 +266,10 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
        if (hw->mac.type == ixgbe_mac_X540) {
                if (hw->phy.id == 0)
                        hw->phy.ops.identify(hw);
-               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
-               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
-               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
-               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+               hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
+               hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
+               hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
+               hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
        }
 
        return 0;
@@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
 {
        ixgbe_link_speed speed = 0;
-       bool link_up = 0;
+       bool link_up = false;
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 
index 33b93ffb87cb1e348a16115913b6d5d1ef0623a5..da31735311f137091e9447e189491e4b223a40ed 100644 (file)
@@ -158,10 +158,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       /* Abort a bad configuration */
-       if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-               return;
-
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +181,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 
        if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
             adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
-               adapter->dcb_set_bitmap |= BIT_PFC;
+               adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +202,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       /* Abort bad configurations */
-       if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-               return;
-
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +301,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
        *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
 }
 
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+               usleep_range(1000, 2000);
+
+       if (netif_running(dev))
+               dev->netdev_ops->ndo_stop(dev);
+
+       ixgbe_clear_interrupt_scheme(adapter);
+       ixgbe_init_interrupt_scheme(adapter);
+
+       if (netif_running(dev))
+               dev->netdev_ops->ndo_open(dev);
+
+       clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +351,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        if (ret)
                return DCB_NO_HW_CHG;
 
-#ifdef IXGBE_FCOE
-       if (up && !(up & (1 << adapter->fcoe.up)))
-               adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
-       /*
-        * Only take down the adapter if an app change occurred. FCoE
-        * may shuffle tx rings in this case and this can not be done
-        * without a reset currently.
-        */
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-               while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
-                       usleep_range(1000, 2000);
-
-               adapter->fcoe.up = ffs(up) - 1;
-
-               if (netif_running(netdev))
-                       netdev->netdev_ops->ndo_stop(netdev);
-               ixgbe_clear_interrupt_scheme(adapter);
-       }
-#endif
-
        if (adapter->dcb_cfg.pfc_mode_enable) {
                switch (adapter->hw.mac.type) {
                case ixgbe_mac_82599EB:
@@ -385,15 +377,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
                }
        }
 
-#ifdef IXGBE_FCOE
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-               ixgbe_init_interrupt_scheme(adapter);
-               if (netif_running(netdev))
-                       netdev->netdev_ops->ndo_open(netdev);
-               ret = DCB_HW_CHG_RST;
-       }
-#endif
-
        if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
                u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
                u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +425,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        if (adapter->dcb_cfg.pfc_mode_enable)
                adapter->hw.fc.current_mode = ixgbe_fc_pfc;
 
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
-               clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+       /* Reprogam FCoE hardware offloads when the traffic class
+        * FCoE is using changes. This happens if the APP info
+        * changes or the up2tc mapping is updated.
+        */
+       if ((up && !(up & (1 << adapter->fcoe.up))) ||
+           (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+               adapter->fcoe.up = ffs(up) - 1;
+               ixgbe_dcbnl_devreset(netdev);
+               ret = DCB_HW_CHG_RST;
+       }
+#endif
+
        adapter->dcb_set_bitmap = 0x00;
        return ret;
 }
@@ -661,22 +655,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
        return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
 }
 
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-
-       if (netif_running(dev))
-               dev->netdev_ops->ndo_stop(dev);
-
-       ixgbe_clear_interrupt_scheme(adapter);
-       ixgbe_init_interrupt_scheme(adapter);
-
-       if (netif_running(dev))
-               dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
 static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
                                   struct dcb_app *app)
 {
@@ -761,7 +739,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
                ixgbe_dcbnl_ieee_setets(dev, &ets);
                ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
        } else if (mode & DCB_CAP_DCBX_VER_CEE) {
-               adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+               u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+               adapter->dcb_set_bitmap |= mask;
                ixgbe_dcbnl_set_all(dev);
        } else {
                /* Drop into single TC mode strict priority as this
index 70d58c3849b0592fa767319a0f14c5551330f15a..da7e580f517ae03528fed27eb2af79ea3df81aaa 100644 (file)
@@ -888,23 +888,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
                               struct ethtool_drvinfo *drvinfo)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
        u32 nvm_track_id;
 
-       strncpy(drvinfo->driver, ixgbe_driver_name,
-               sizeof(drvinfo->driver) - 1);
-       strncpy(drvinfo->version, ixgbe_driver_version,
-               sizeof(drvinfo->version) - 1);
+       strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ixgbe_driver_version,
+               sizeof(drvinfo->version));
 
        nvm_track_id = (adapter->eeprom_verh << 16) |
                        adapter->eeprom_verl;
-       snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
                 nvm_track_id);
 
-       strncpy(drvinfo->fw_version, firmware_version,
-               sizeof(drvinfo->fw_version) - 1);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-               sizeof(drvinfo->bus_info) - 1);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = IXGBE_STATS_LEN;
        drvinfo->testinfo_len = IXGBE_TEST_LEN;
        drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1959,12 +1955,21 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
        /* WOL not supported except for the following */
        switch(hw->device_id) {
        case IXGBE_DEV_ID_82599_SFP:
-               /* Only this subdevice supports WOL */
-               if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+               /* Only these subdevices could supports WOL */
+               switch (hw->subsystem_device_id) {
+               case IXGBE_SUBDEV_ID_82599_560FLR:
+                       /* only support first port */
+                       if (hw->bus.func != 0) {
+                               wol->supported = 0;
+                               break;
+                       }
+               case IXGBE_SUBDEV_ID_82599_SFP:
+                       retval = 0;
+                       break;
+               default:
                        wol->supported = 0;
                        break;
                }
-               retval = 0;
                break;
        case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
                /* All except this subdevice support WOL */
index 8ef92d1a6aa126037c3ce9d1049a7fde3f766790..74669a8c060e0a697fd3302133500daef162b751 100644 (file)
@@ -106,6 +106,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
        /* required last entry */
        {0, }
 };
@@ -146,7 +147,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
 {
        BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
 
-       /* flush memory to make sure state is correct before next watchog */
+       /* flush memory to make sure state is correct before next watchdog */
        smp_mb__before_clear_bit();
        clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
 }
@@ -1140,7 +1141,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 
                if (ring_is_ps_enabled(rx_ring)) {
                        if (!bi->page) {
-                               bi->page = netdev_alloc_page(rx_ring->netdev);
+                               bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                                if (!bi->page) {
                                        rx_ring->rx_stats.alloc_rx_page_failed++;
                                        goto no_buffers;
@@ -2156,7 +2157,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
 
        /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
-        * therefore no explict interrupt disable is necessary */
+        * therefore no explicit interrupt disable is necessary */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
        if (!eicr) {
                /*
@@ -3044,7 +3045,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        hw->mac.ops.enable_rx_dma(hw, rxctrl);
 }
 
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3054,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        /* add VID to filter table */
        hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3067,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        /* remove VID from filter table */
        hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 /**
@@ -3602,7 +3607,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
 {
        /*
-        * We are assuming the worst case scenerio here, and that
+        * We are assuming the worst case scenario here, and that
         * is that an SFP was inserted/removed after the reset
         * but before SFP detection was enabled.  As such the best
         * solution is to just start searching as soon as we start
@@ -3824,7 +3829,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        case IXGBE_ERR_EEPROM_VERSION:
                /* We are running on a pre-production device, log a warning */
                e_dev_warn("This device is a pre-production adapter/LOM. "
-                          "Please be aware there may be issuesassociated with "
+                          "Please be aware there may be issues associated with "
                           "your hardware.  If you are experiencing problems "
                           "please contact your Intel or hardware "
                           "representative who provided you with this "
@@ -4019,7 +4024,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
                /* Mark all the VFs as inactive */
                for (i = 0 ; i < adapter->num_vfs; i++)
-                       adapter->vfinfo[i].clear_to_send = 0;
+                       adapter->vfinfo[i].clear_to_send = false;
 
                /* ping all the active vfs to let them know we are going down */
                ixgbe_ping_all_vfs(adapter);
@@ -5788,9 +5793,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
  * @adapter - pointer to the device adapter structure
  *
  * This function serves two purposes.  First it strobes the interrupt lines
- * in order to make certain interrupts are occuring.  Secondly it sets the
+ * in order to make certain interrupts are occurring.  Secondly it sets the
  * bits needed to check for TX hangs.  As a result we should immediately
- * determine if a hang has occured.
+ * determine if a hang has occurred.
  */
 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
 {
@@ -7128,7 +7133,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
                return -EINVAL;
 
        /* Hardware has to reinitialize queues and interrupts to
-        * match packet buffer alignment. Unfortunantly, the
+        * match packet buffer alignment. Unfortunately, the
         * hardware is not flexible enough to do this dynamically.
         */
        if (netif_running(dev))
@@ -7174,7 +7179,8 @@ void ixgbe_do_reset(struct net_device *netdev)
                ixgbe_reset(adapter);
 }
 
-static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
+       netdev_features_t data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -7204,7 +7210,8 @@ static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
        return data;
 }
 
-static int ixgbe_set_features(struct net_device *netdev, u32 data)
+static int ixgbe_set_features(struct net_device *netdev,
+       netdev_features_t data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        bool need_reset = false;
@@ -7598,9 +7605,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        adapter->wol = 0;
        switch (pdev->device) {
        case IXGBE_DEV_ID_82599_SFP:
-               /* Only this subdevice supports WOL */
-               if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+               /* Only these subdevice supports WOL */
+               switch (pdev->subsystem_device) {
+               case IXGBE_SUBDEV_ID_82599_560FLR:
+                       /* only support first port */
+                       if (hw->bus.func != 0)
+                               break;
+               case IXGBE_SUBDEV_ID_82599_SFP:
                        adapter->wol = IXGBE_WUFC_MAG;
+                       break;
+               }
                break;
        case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
                /* All except this subdevice support WOL */
index 9a56fd74e6738f5a16beb7b2c95c75ccc619e366..7cf1e1f56c69664d9397c46f554dc38701bdb81e 100644 (file)
@@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
        u32 max_retry = 10;
        u32 retry = 0;
        u16 swfw_mask = 0;
-       bool nack = 1;
+       bool nack = true;
        *data = 0;
 
        if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
 {
        s32 i;
-       bool bit = 0;
+       bool bit = false;
 
        for (i = 7; i >= 0; i--) {
                ixgbe_clock_in_i2c_bit(hw, &bit);
@@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
        s32 status = 0;
        s32 i;
        u32 i2cctl;
-       bool bit = 0;
+       bool bit = false;
 
        for (i = 7; i >= 0; i--) {
                bit = (data >> i) & 0x1;
@@ -1457,6 +1457,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
        i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
        i2cctl |= IXGBE_I2C_DATA_OUT;
        IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+       IXGBE_WRITE_FLUSH(hw);
 
        return status;
 }
@@ -1473,7 +1474,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
        u32 i = 0;
        u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
        u32 timeout = 10;
-       bool ack = 1;
+       bool ack = true;
 
        ixgbe_raise_i2c_clk(hw, &i2cctl);
 
@@ -1646,9 +1647,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
        bool data;
 
        if (*i2cctl & IXGBE_I2C_DATA_IN)
-               data = 1;
+               data = true;
        else
-               data = 0;
+               data = false;
 
        return data;
 }
index 00fcd39ad666b6c289379e401800438347952dd7..cf6812dd1436116ab23beb0cf57b1ab7732c8b38 100644 (file)
@@ -572,7 +572,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 
                /* reply to reset with ack and vf mac address */
                msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
-               memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+               memcpy(new_mac, vf_mac, ETH_ALEN);
                /*
                 * Piggyback the multicast filter type so VF can compute the
                 * correct vectors
index df04f1a3857c96e5e16b4098580d2904c5c33a2e..e8badab033590179ff5056c2c0a8f61cce69b9c4 100644 (file)
@@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
                           u8 qos);
index 6c5cca808bd7e492894e0225bf800861b4050793..802bfa0f62cc022c34965c32343719749e1c4ca7 100644 (file)
@@ -57,6 +57,7 @@
 #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152a
 #define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529
 #define IXGBE_SUBDEV_ID_82599_SFP        0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0
 #define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_SFP_SF2       0x154D
 #define IXGBE_DEV_ID_82599EN_SFP         0x1557
@@ -65,6 +66,7 @@
 #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ  0x000C
 #define IXGBE_DEV_ID_82599_LS            0x154F
 #define IXGBE_DEV_ID_X540T               0x1528
+#define IXGBE_DEV_ID_82599_SFP_SF_QP     0x154A
 
 /* VF Device IDs */
 #define IXGBE_DEV_ID_82599_VF           0x10ED
@@ -1710,8 +1712,6 @@ enum {
 #define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */
 #define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */
 
-#define IXGBE_ETH_LENGTH_OF_ADDRESS   6
-
 #define IXGBE_EEPROM_PAGE_SIZE_MAX       128
 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
 #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2802,9 +2802,9 @@ struct ixgbe_eeprom_info {
 struct ixgbe_mac_info {
        struct ixgbe_mac_operations     ops;
        enum ixgbe_mac_type             type;
-       u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-       u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-       u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+       u8                              addr[ETH_ALEN];
+       u8                              perm_addr[ETH_ALEN];
+       u8                              san_addr[ETH_ALEN];
        /* prefix for World Wide Node Name (WWNN) */
        u16                             wwnn_prefix;
        /* prefix for World Wide Port Name (WWPN) */
index e5101e91b6b55dbc59b99cf710c3528f36035e34..8cc5eccfd65194a0a738b969f0f9b97529842d9f 100644 (file)
@@ -751,16 +751,20 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
 {
        u32 macc_reg;
        u32 ledctl_reg;
+       ixgbe_link_speed speed;
+       bool link_up;
 
        /*
-        * In order for the blink bit in the LED control register
-        * to work, link and speed must be forced in the MAC. We
-        * will reverse this when we stop the blinking.
+        * Link should be up in order for the blink bit in the LED control
+        * register to work. Force link and speed in the MAC if link is down.
+        * This will be reversed when we stop the blinking.
         */
-       macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
-       macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
-       IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+       hw->mac.ops.check_link(hw, &speed, &link_up, false);
+       if (link_up == false) {
+               macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+               macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+               IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+       }
        /* Set the LED to LINK_UP + BLINK. */
        ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
        ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
index 78abb6f1a866d1190b7894b12c411e20c1128005..2eb89cb94a0d4c45a14831cda8e875878a4679e0 100644 (file)
@@ -35,7 +35,6 @@
 #define IXGBE_VF_IRQ_CLEAR_MASK         7
 #define IXGBE_VF_MAX_TX_QUEUES          1
 #define IXGBE_VF_MAX_RX_QUEUES          1
-#define IXGBE_ETH_LENGTH_OF_ADDRESS     6
 
 /* Link speed */
 typedef u32 ixgbe_link_speed;
index e29ba4506b74d4a3605e74bcb10f9262fb70c452..dc8e6511c64068debdb4cec9887b321e348a9ea9 100644 (file)
@@ -27,6 +27,8 @@
 
 /* ethtool support for ixgbevf */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -265,11 +267,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-       strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
-       strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
-       strlcpy(drvinfo->fw_version, "N/A", 4);
-       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ixgbevf_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
 }
 
 static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -549,8 +551,8 @@ static const u32 register_test_patterns[] = {
        writel((W & M), (adapter->hw.hw_addr + R));                           \
        val = readl(adapter->hw.hw_addr + R);                                 \
        if ((W & M) != (val & M)) {                                           \
-               printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
-                                "expected 0x%08X\n", R, (val & M), (W & M)); \
+               pr_err("set/check reg %04X test failed: got 0x%08X expected " \
+                      "0x%08X\n", R, (val & M), (W & M));                    \
                *data = R;                                                    \
                writel(before, (adapter->hw.hw_addr + R));                    \
                return 1;                                                     \
index 4c8e19951d57e58d53dedd3249c72d976413d020..891162d1610ca6e21eaf88ab41cd07c918292327 100644 (file)
@@ -29,6 +29,9 @@
 /******************************************************************************
  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
 ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
@@ -363,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
                if (!bi->page_dma &&
                    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
                        if (!bi->page) {
-                               bi->page = netdev_alloc_page(adapter->netdev);
+                               bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                                if (!bi->page) {
                                        adapter->alloc_rx_page_failed++;
                                        goto no_buffers;
@@ -1400,7 +1403,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
        }
 }
 
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1409,9 +1412,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, true);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1420,6 +1425,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, false);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1437,7 +1444,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
        int count = 0;
 
        if ((netdev_uc_count(netdev)) > 10) {
-               printk(KERN_ERR "Too many unicast filters - No Space\n");
+               pr_err("Too many unicast filters - No Space\n");
                return -ENOSPC;
        }
 
@@ -2135,7 +2142,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
 
        err = ixgbevf_alloc_queues(adapter);
        if (err) {
-               printk(KERN_ERR "Unable to allocate memory for queues\n");
+               pr_err("Unable to allocate memory for queues\n");
                goto err_alloc_queues;
        }
 
@@ -2189,7 +2196,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        } else {
                err = hw->mac.ops.init_hw(hw);
                if (err) {
-                       printk(KERN_ERR "init_shared_code failed: %d\n", err);
+                       pr_err("init_shared_code failed: %d\n", err);
                        goto out;
                }
        }
@@ -2630,8 +2637,8 @@ static int ixgbevf_open(struct net_device *netdev)
                 * the vf can't start. */
                if (hw->adapter_stopped) {
                        err = IXGBE_ERR_MBX;
-                       printk(KERN_ERR "Unable to start - perhaps the PF"
-                              " Driver isn't up yet\n");
+                       pr_err("Unable to start - perhaps the PF Driver isn't "
+                              "up yet\n");
                        goto err_setup_reset;
                }
        }
@@ -2842,10 +2849,8 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
                                break;
                        default:
                                if (unlikely(net_ratelimit())) {
-                                       printk(KERN_WARNING
-                                              "partial checksum but "
-                                              "proto=%x!\n",
-                                              skb->protocol);
+                                       pr_warn("partial checksum but "
+                                               "proto=%x!\n", skb->protocol);
                                }
                                break;
                        }
@@ -3249,7 +3254,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        return stats;
 }
 
-static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+static int ixgbevf_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
@@ -3414,7 +3420,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
 
        if (!is_valid_ether_addr(netdev->dev_addr)) {
-               printk(KERN_ERR "invalid MAC address\n");
+               pr_err("invalid MAC address\n");
                err = -EIO;
                goto err_sw_init;
        }
@@ -3535,10 +3541,10 @@ static struct pci_driver ixgbevf_driver = {
 static int __init ixgbevf_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
-              ixgbevf_driver_version);
+       pr_info("%s - version %s\n", ixgbevf_driver_string,
+               ixgbevf_driver_version);
 
-       printk(KERN_INFO "%s\n", ixgbevf_copyright);
+       pr_info("%s\n", ixgbevf_copyright);
 
        ret = pci_register_driver(&ixgbevf_driver);
        return ret;
index ea393eb03f3aeba86c4658b0451a54b447e26034..9d38a94a348aed51fa6abdeae320155eb8fcde7a 100644 (file)
@@ -47,8 +47,8 @@
 #define IXGBE_VFMAILBOX_RSTD     0x00000080 /* PF has indicated reset done */
 #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
 
-#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * vfn))
+#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * (vfn)))
 
 #define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
 #define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
index 189200eeca26616220ae3fab40c2000bedd445b3..5e4d5e5cdf38dc794f791fdc95b9e2ade58566db 100644 (file)
 #define IXGBE_VTEIMC           0x0010C
 #define IXGBE_VTEIAC           0x00110
 #define IXGBE_VTEIAM           0x00114
-#define IXGBE_VTEITR(x)        (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x)        (0x00120 + (4 * x))
+#define IXGBE_VTEITR(x)        (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x)        (0x00120 + (4 * (x)))
 #define IXGBE_VTIVAR_MISC      0x00140
-#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * x))
-#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * x))
+#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * (x)))
 #define IXGBE_VFPSRTYPE        0x00300
-#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * x))
+#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * (x)))
 #define IXGBE_VFGPRC           0x0101C
 #define IXGBE_VFGPTC           0x0201C
 #define IXGBE_VFGORC_LSB       0x01020
index aa3682e8c473cc84bcf9c6bc0b7d17a12857854d..21533e300367f78dd4058ee2df7ba31c7df1122c 100644 (file)
@@ -108,7 +108,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
        if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
                return IXGBE_ERR_INVALID_MAC_ADDR;
 
-       memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+       memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
        hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 
        return 0;
@@ -211,7 +211,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
  **/
 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 {
-       memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+       memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
 
        return 0;
 }
index 7becff1f387d7c9d84fb26ab3db6f7299bb64f15..27d651a80f3f32f6458645ad2fc6a778537a859c 100644 (file)
@@ -1744,6 +1744,112 @@ jme_phy_off(struct jme_adapter *jme)
                jme_new_phy_off(jme);
 }
 
+static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+       u32 phy_addr;
+
+       phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+       return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+                       JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+       u32 phy_addr;
+
+       phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+                       phy_data);
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+       u32 ctrl1000, phy_data;
+
+       jme_phy_off(jme);
+       jme_phy_on(jme);
+       /*  Enabel PHY test mode 1 */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       ctrl1000 |= PHY_GAD_TEST_MODE_1;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+       phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+                       JM_PHY_EXT_COMM_2_CALI_ENABLE;
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+       msleep(20);
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+                       JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+                       JM_PHY_EXT_COMM_2_CALI_LATCH);
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+       /*  Disable PHY test mode */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+       return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+       u32 phy_comm0 = 0, phy_comm1 = 0;
+       u8 nic_ctrl;
+
+       pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+       if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+               return 0;
+
+       switch (jme->pdev->device) {
+       case PCI_DEVICE_ID_JMICRON_JMC250:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               break;
+       case PCI_DEVICE_ID_JMICRON_JMC260:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+                       phy_comm0 = 0x608A;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+                       phy_comm0 = 0x408A;
+               break;
+       default:
+               return -ENODEV;
+       }
+       if (phy_comm0)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+       if (phy_comm1)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+       return 0;
+}
+
 static int
 jme_open(struct net_device *netdev)
 {
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
-
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_reset_link(jme);
 
        return 0;
@@ -1883,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev,
                struct page *page,
                u32 page_offset,
                u32 len,
-               u8 hidma)
+               bool hidma)
 {
        dma_addr_t dmaaddr;
 
@@ -1917,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct jme_ring *txring = &(jme->txring[0]);
        struct txdesc *txdesc = txring->desc, *ctxdesc;
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
-       u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+       bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
        int i, nr_frags = skb_shinfo(skb)->nr_frags;
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
@@ -2292,9 +2399,9 @@ jme_get_drvinfo(struct net_device *netdev,
 {
        struct jme_adapter *jme = netdev_priv(netdev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(jme->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
 }
 
 static int
@@ -2620,8 +2727,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
        jme->msg_enable = value;
 }
 
-static u32
-jme_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+jme_fix_features(struct net_device *netdev, netdev_features_t features)
 {
        if (netdev->mtu > 1900)
                features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
@@ -2629,7 +2736,7 @@ jme_fix_features(struct net_device *netdev, u32 features)
 }
 
 static int
-jme_set_features(struct net_device *netdev, u32 features)
+jme_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
 
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
-
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_start_irq(jme);
        netif_device_attach(netdev);
 
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
index 02ea27c1dcb5a464f06d9b38d98259f6f080fd4c..4304072bd3c536e852a38cf6c37716df8500ed66 100644 (file)
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
                                  RXMCS_CHECKSUM,
 };
 
+/*     Extern PHY common register 2    */
+
+#define PHY_GAD_TEST_MODE_1                    0x00002000
+#define PHY_GAD_TEST_MODE_MSK                  0x0000E000
+#define JM_PHY_SPEC_REG_READ                   0x00004000
+#define JM_PHY_SPEC_REG_WRITE                  0x00008000
+#define PHY_CALIBRATION_DELAY                  20
+#define JM_PHY_SPEC_ADDR_REG                   0x1E
+#define JM_PHY_SPEC_DATA_REG                   0x1F
+
+#define JM_PHY_EXT_COMM_0_REG                  0x30
+#define JM_PHY_EXT_COMM_1_REG                  0x31
+#define JM_PHY_EXT_COMM_2_REG                  0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE          0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0          0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH           0x10
+#define PCI_PRIV_SHARE_NICCTRL                 0xF5
+#define JME_FLAG_PHYEA_ENABLE                  0x2
+
 /*
  * Wakeup Frame setup interface registers
  */
index d8430f487b84f8012ec8390e522ae19eeba9949e..6ad094f176f8b175d6a3c63f1d61b6fbd3e06ec6 100644 (file)
@@ -1230,18 +1230,7 @@ static struct platform_driver korina_driver = {
        .remove = korina_remove,
 };
 
-static int __init korina_init_module(void)
-{
-       return platform_driver_register(&korina_driver);
-}
-
-static void korina_cleanup_module(void)
-{
-       return platform_driver_unregister(&korina_driver);
-}
-
-module_init(korina_init_module);
-module_exit(korina_cleanup_module);
+module_platform_driver(korina_driver);
 
 MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
 MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
index 6bb2b9506cadfde73ddef809bc0fcf78dd202b0a..0b3567ab812151f00210544cc805815b0953a90d 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
 
 #include <asm/checksum.h>
 
index 194a03113802f80b67139e9c6fcfec6b3f523164..e87847e32ddb7046a926f1dde375a88d4d62232d 100644 (file)
@@ -1502,10 +1502,12 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static void mv643xx_eth_get_drvinfo(struct net_device *dev,
                                    struct ethtool_drvinfo *drvinfo)
 {
-       strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
-       strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
-       strncpy(drvinfo->fw_version, "N/A", 32);
-       strncpy(drvinfo->bus_info, "platform", 32);
+       strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
        drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
 }
 
@@ -1578,10 +1580,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
 
 
 static int
-mv643xx_eth_set_features(struct net_device *dev, u32 features)
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct mv643xx_eth_private *mp = netdev_priv(dev);
-       u32 rx_csum = features & NETIF_F_RXCSUM;
+       bool rx_csum = features & NETIF_F_RXCSUM;
 
        wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
 
index d17d0624c5e63d13689673ba16295c51aefb1297..5ec409e3da090d6657afb2bf45e5ffa6d4e339e4 100644 (file)
@@ -1645,18 +1645,7 @@ static struct platform_driver pxa168_eth_driver = {
                   },
 };
 
-static int __init pxa168_init_module(void)
-{
-       return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
-       platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
+module_platform_driver(pxa168_eth_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
index c7b60839ac9951caa4b3de0f0577bfd7d39bb1ca..18a87a57fc0aa4ca9ff3bb406a9633fc281bc308 100644 (file)
@@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev,
 {
        struct skge_port *skge = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(skge->hw->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+               sizeof(info->bus_info));
 }
 
 static const struct skge_stat {
@@ -2606,6 +2606,9 @@ static int skge_up(struct net_device *dev)
        spin_unlock_irq(&hw->hw_lock);
 
        napi_enable(&skge->napi);
+
+       skge_set_multicast(dev);
+
        return 0;
 
  free_tx_ring:
@@ -4039,7 +4042,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int skge_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -4101,7 +4104,7 @@ static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
 #else
 
 #define SKGE_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
 
 static void skge_shutdown(struct pci_dev *pdev)
 {
index fdc6c394c683ed64e9ecd456427e742d0f101bcd..760c2b17dfd3675037133a996c9d98aa1e5fb5f4 100644 (file)
@@ -50,7 +50,7 @@
 #include "sky2.h"
 
 #define DRV_NAME               "sky2"
-#define DRV_VERSION            "1.29"
+#define DRV_VERSION            "1.30"
 
 /*
  * The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -68,7 +68,7 @@
 #define MAX_SKB_TX_LE  (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
 #define TX_MIN_PENDING         (MAX_SKB_TX_LE+1)
 #define TX_MAX_PENDING         1024
-#define TX_DEF_PENDING         127
+#define TX_DEF_PENDING         63
 
 #define TX_WATCHDOG            (5 * HZ)
 #define NAPI_WEIGHT            64
@@ -869,6 +869,7 @@ static void sky2_wol_init(struct sky2_port *sky2)
 
        /* block receiver */
        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+       sky2_read32(hw, B0_CTST);
 }
 
 static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -1109,6 +1110,7 @@ static void tx_init(struct sky2_port *sky2)
        sky2->tx_prod = sky2->tx_cons = 0;
        sky2->tx_tcpsum = 0;
        sky2->tx_last_mss = 0;
+       netdev_reset_queue(sky2->netdev);
 
        le = get_tx_le(sky2, &sky2->tx_prod);
        le->addr = 0;
@@ -1274,8 +1276,16 @@ static void rx_set_checksum(struct sky2_port *sky2)
                     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
 }
 
+/*
+ * Fixed initial key as seed to RSS.
+ */
+static const uint32_t rss_init_key[10] = {
+       0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
+       0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
+};
+
 /* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev, u32 features)
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
@@ -1289,12 +1299,9 @@ static void rx_set_rss(struct net_device *dev, u32 features)
 
        /* Program RSS initial values */
        if (features & NETIF_F_RXHASH) {
-               u32 key[nkeys];
-
-               get_random_bytes(key, nkeys * sizeof(u32));
                for (i = 0; i < nkeys; i++)
                        sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
-                                    key[i]);
+                                    rss_init_key[i]);
 
                /* Need to turn on (undocumented) flag to make hashing work  */
                sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
@@ -1396,7 +1403,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
 
-static void sky2_vlan_mode(struct net_device *dev, u32 features)
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
@@ -1717,6 +1724,8 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
        if (err)
                dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
        else {
+               hw->flags |= SKY2_HW_IRQ_SETUP;
+
                napi_enable(&hw->napi);
                sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
                sky2_read32(hw, B0_IMSK);
@@ -1727,7 +1736,7 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
 
 
 /* Bring up network interface. */
-static int sky2_up(struct net_device *dev)
+static int sky2_open(struct net_device *dev)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
@@ -1747,6 +1756,11 @@ static int sky2_up(struct net_device *dev)
 
        sky2_hw_up(sky2);
 
+       if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+           hw->chip_id == CHIP_ID_YUKON_PRM ||
+           hw->chip_id == CHIP_ID_YUKON_OP_2)
+               imask |= Y2_IS_PHY_QLNK;        /* enable PHY Quick Link */
+
        /* Enable interrupts from phy/mac for port */
        imask = sky2_read32(hw, B0_IMSK);
        imask |= portirq_msk[port];
@@ -1958,6 +1972,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
        if (tx_avail(sky2) <= MAX_SKB_TX_LE)
                netif_stop_queue(dev);
 
+       netdev_sent_queue(dev, skb->len);
        sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
 
        return NETDEV_TX_OK;
@@ -1989,7 +2004,8 @@ mapping_error:
 static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
 {
        struct net_device *dev = sky2->netdev;
-       unsigned idx;
+       u16 idx;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        BUG_ON(done >= sky2->tx_ring_size);
 
@@ -2004,10 +2020,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
                        netif_printk(sky2, tx_done, KERN_DEBUG, dev,
                                     "tx done %u\n", idx);
 
-                       u64_stats_update_begin(&sky2->tx_stats.syncp);
-                       ++sky2->tx_stats.packets;
-                       sky2->tx_stats.bytes += skb->len;
-                       u64_stats_update_end(&sky2->tx_stats.syncp);
+                       pkts_compl++;
+                       bytes_compl += skb->len;
 
                        re->skb = NULL;
                        dev_kfree_skb_any(skb);
@@ -2018,6 +2032,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
 
        sky2->tx_cons = idx;
        smp_mb();
+
+       netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+       u64_stats_update_begin(&sky2->tx_stats.syncp);
+       sky2->tx_stats.packets += pkts_compl;
+       sky2->tx_stats.bytes += bytes_compl;
+       u64_stats_update_end(&sky2->tx_stats.syncp);
 }
 
 static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -2040,6 +2061,8 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
 
        sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
        sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+
+       sky2_read32(hw, B0_CTST);
 }
 
 static void sky2_hw_down(struct sky2_port *sky2)
@@ -2090,7 +2113,7 @@ static void sky2_hw_down(struct sky2_port *sky2)
 }
 
 /* Network shutdown */
-static int sky2_down(struct net_device *dev)
+static int sky2_close(struct net_device *dev)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
@@ -2101,15 +2124,22 @@ static int sky2_down(struct net_device *dev)
 
        netif_info(sky2, ifdown, dev, "disabling interface\n");
 
-       /* Disable port IRQ */
-       sky2_write32(hw, B0_IMSK,
-                    sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
-       sky2_read32(hw, B0_IMSK);
-
        if (hw->ports == 1) {
+               sky2_write32(hw, B0_IMSK, 0);
+               sky2_read32(hw, B0_IMSK);
+
                napi_disable(&hw->napi);
                free_irq(hw->pdev->irq, hw);
+               hw->flags &= ~SKY2_HW_IRQ_SETUP;
        } else {
+               u32 imask;
+
+               /* Disable port IRQ */
+               imask  = sky2_read32(hw, B0_IMSK);
+               imask &= ~portirq_msk[sky2->port];
+               sky2_write32(hw, B0_IMSK, imask);
+               sky2_read32(hw, B0_IMSK);
+
                synchronize_irq(hw->pdev->irq);
                napi_synchronize(&hw->napi);
        }
@@ -2587,7 +2617,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
        if (netif_running(dev)) {
                sky2_tx_complete(sky2, last);
 
-               /* Wake unless it's detached, and called e.g. from sky2_down() */
+               /* Wake unless it's detached, and called e.g. from sky2_close() */
                if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
                        netif_wake_queue(dev);
        }
@@ -3258,7 +3288,6 @@ static void sky2_reset(struct sky2_hw *hw)
            hw->chip_id == CHIP_ID_YUKON_PRM ||
            hw->chip_id == CHIP_ID_YUKON_OP_2) {
                u16 reg;
-               u32 msk;
 
                if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
                        /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
@@ -3281,11 +3310,6 @@ static void sky2_reset(struct sky2_hw *hw)
                sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
                sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
 
-               /* enable PHY Quick Link */
-               msk = sky2_read32(hw, B0_IMSK);
-               msk |= Y2_IS_PHY_QLNK;
-               sky2_write32(hw, B0_IMSK, msk);
-
                /* check if PSMv2 was running before */
                reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
                if (reg & PCI_EXP_LNKCTL_ASPMC)
@@ -3383,7 +3407,7 @@ static void sky2_detach(struct net_device *dev)
                netif_tx_lock(dev);
                netif_device_detach(dev);       /* stop txq */
                netif_tx_unlock(dev);
-               sky2_down(dev);
+               sky2_close(dev);
        }
 }
 
@@ -3393,7 +3417,7 @@ static int sky2_reattach(struct net_device *dev)
        int err = 0;
 
        if (netif_running(dev)) {
-               err = sky2_up(dev);
+               err = sky2_open(dev);
                if (err) {
                        netdev_info(dev, "could not restart %d\n", err);
                        dev_close(dev);
@@ -3410,10 +3434,13 @@ static void sky2_all_down(struct sky2_hw *hw)
 {
        int i;
 
-       sky2_read32(hw, B0_IMSK);
-       sky2_write32(hw, B0_IMSK, 0);
-       synchronize_irq(hw->pdev->irq);
-       napi_disable(&hw->napi);
+       if (hw->flags & SKY2_HW_IRQ_SETUP) {
+               sky2_read32(hw, B0_IMSK);
+               sky2_write32(hw, B0_IMSK, 0);
+
+               synchronize_irq(hw->pdev->irq);
+               napi_disable(&hw->napi);
+       }
 
        for (i = 0; i < hw->ports; i++) {
                struct net_device *dev = hw->dev[i];
@@ -3446,11 +3473,12 @@ static void sky2_all_up(struct sky2_hw *hw)
                netif_wake_queue(dev);
        }
 
-       sky2_write32(hw, B0_IMSK, imask);
-       sky2_read32(hw, B0_IMSK);
-
-       sky2_read32(hw, B0_Y2_SP_LISR);
-       napi_enable(&hw->napi);
+       if (hw->flags & SKY2_HW_IRQ_SETUP) {
+               sky2_write32(hw, B0_IMSK, imask);
+               sky2_read32(hw, B0_IMSK);
+               sky2_read32(hw, B0_Y2_SP_LISR);
+               napi_enable(&hw->napi);
+       }
 }
 
 static void sky2_restart(struct work_struct *work)
@@ -3623,10 +3651,10 @@ static void sky2_get_drvinfo(struct net_device *dev,
 {
        struct sky2_port *sky2 = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+               sizeof(info->bus_info));
 }
 
 static const struct sky2_stat {
@@ -4071,6 +4099,16 @@ static int sky2_set_coalesce(struct net_device *dev,
        return 0;
 }
 
+/*
+ * Hardware is limited to min of 128 and max of 2048 for ring size
+ * and  rounded up to next power of two
+ * to avoid division in modulus calclation
+ */
+static unsigned long roundup_ring_size(unsigned long pending)
+{
+       return max(128ul, roundup_pow_of_two(pending+1));
+}
+
 static void sky2_get_ringparam(struct net_device *dev,
                               struct ethtool_ringparam *ering)
 {
@@ -4098,7 +4136,7 @@ static int sky2_set_ringparam(struct net_device *dev,
 
        sky2->rx_pending = ering->rx_pending;
        sky2->tx_pending = ering->tx_pending;
-       sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1);
+       sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
 
        return sky2_reattach(dev);
 }
@@ -4281,7 +4319,8 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
        return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
 }
 
-static u32 sky2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        const struct sky2_port *sky2 = netdev_priv(dev);
        const struct sky2_hw *hw = sky2->hw;
@@ -4305,13 +4344,13 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int sky2_set_features(struct net_device *dev, u32 features)
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & NETIF_F_RXCSUM) {
-               u32 on = features & NETIF_F_RXCSUM;
+               bool on = features & NETIF_F_RXCSUM;
                sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
                             on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
        }
@@ -4556,7 +4595,7 @@ static int sky2_device_event(struct notifier_block *unused,
        struct net_device *dev = ptr;
        struct sky2_port *sky2 = netdev_priv(dev);
 
-       if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
+       if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
                return NOTIFY_DONE;
 
        switch (event) {
@@ -4621,8 +4660,8 @@ static __exit void sky2_debug_cleanup(void)
    not allowing netpoll on second port */
 static const struct net_device_ops sky2_netdev_ops[2] = {
   {
-       .ndo_open               = sky2_up,
-       .ndo_stop               = sky2_down,
+       .ndo_open               = sky2_open,
+       .ndo_stop               = sky2_close,
        .ndo_start_xmit         = sky2_xmit_frame,
        .ndo_do_ioctl           = sky2_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
@@ -4638,8 +4677,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
 #endif
   },
   {
-       .ndo_open               = sky2_up,
-       .ndo_stop               = sky2_down,
+       .ndo_open               = sky2_open,
+       .ndo_stop               = sky2_close,
        .ndo_start_xmit         = sky2_xmit_frame,
        .ndo_do_ioctl           = sky2_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
@@ -4692,7 +4731,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
        spin_lock_init(&sky2->phy_lock);
 
        sky2->tx_pending = TX_DEF_PENDING;
-       sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1);
+       sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
        sky2->rx_pending = RX_DEF_PENDING;
 
        hw->dev[port] = dev;
index 0af31b8b5f106174b74c99cd124c758d95c71672..ff6f58bf822aa378ffa0975140733fa40bf41f5d 100644 (file)
@@ -2287,6 +2287,7 @@ struct sky2_hw {
 #define SKY2_HW_RSS_BROKEN     0x00000100
 #define SKY2_HW_VLAN_BROKEN     0x00000200
 #define SKY2_HW_RSS_CHKSUM     0x00000400      /* RSS requires chksum */
+#define SKY2_HW_IRQ_SETUP      0x00000800
 
        u8                   chip_id;
        u8                   chip_rev;
index d1aa45a158541b83168de0fcb8513c297ff32a6e..4a40ab967eeb806d508d14c79c04503dc26ac5e4 100644 (file)
@@ -1,7 +1,7 @@
 obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
 
 mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
index 45aea9c3ae2c3f0825771a9a216ee1ccfa5765d7..915e947b422d63888dfd65522b7d14a3fa4162ff 100644 (file)
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
 static int internal_err_reset = 1;
 module_param(internal_err_reset, int, 0644);
 MODULE_PARM_DESC(internal_err_reset,
-                "Reset device on internal errors if non-zero (default 1)");
+                "Reset device on internal errors if non-zero"
+                " (default 1, in SRIOV mode default is 0)");
 
 static void dump_err_buf(struct mlx4_dev *dev)
 {
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        phys_addr_t addr;
 
+       /*If we are in SRIOV the default of the module param must be 0*/
+       if (mlx4_is_mfunc(dev))
+               internal_err_reset = 0;
+
        INIT_LIST_HEAD(&priv->catas_err.list);
        init_timer(&priv->catas_err.timer);
        priv->catas_err.map = NULL;
index 78f5a1a0b8c8ca806ebe8e7385c822885dbf29c1..978f593094c0402baa207d667c1c5b65a852f7c9 100644 (file)
 #include <linux/errno.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
 
 #include <asm/io.h>
 
 #include "mlx4.h"
+#include "fw.h"
 
 #define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK     0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
 
 enum {
        /* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
        int                     next;
        u64                     out_param;
        u16                     token;
+       u8                      fw_status;
 };
 
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+                                   struct mlx4_vhcr_cmd *in_vhcr);
+
 static int mlx4_status_to_errno(u8 status)
 {
        static const int trans_table[] = {
@@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status)
        return trans_table[status];
 }
 
+static u8 mlx4_errno_to_status(int errno)
+{
+       switch (errno) {
+       case -EPERM:
+               return CMD_STAT_BAD_OP;
+       case -EINVAL:
+               return CMD_STAT_BAD_PARAM;
+       case -ENXIO:
+               return CMD_STAT_BAD_SYS_STATE;
+       case -EBUSY:
+               return CMD_STAT_RESOURCE_BUSY;
+       case -ENOMEM:
+               return CMD_STAT_EXCEED_LIM;
+       case -ENFILE:
+               return CMD_STAT_ICM_ERROR;
+       default:
+               return CMD_STAT_INTERNAL_ERR;
+       }
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 status = readl(&priv->mfunc.comm->slave_read);
+
+       return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 val;
+
+       priv->cmd.comm_toggle ^= 1;
+       val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+       __raw_writel((__force u32) cpu_to_be32(val),
+                    &priv->mfunc.comm->slave_write);
+       mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+                      unsigned long timeout)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       unsigned long end;
+       int err = 0;
+       int ret_from_pending = 0;
+
+       /* First, verify that the master reports correct status */
+       if (comm_pending(dev)) {
+               mlx4_warn(dev, "Communication channel is not idle."
+                         "my toggle is %d (cmd:0x%x)\n",
+                         priv->cmd.comm_toggle, cmd);
+               return -EAGAIN;
+       }
+
+       /* Write command */
+       down(&priv->cmd.poll_sem);
+       mlx4_comm_cmd_post(dev, cmd, param);
+
+       end = msecs_to_jiffies(timeout) + jiffies;
+       while (comm_pending(dev) && time_before(jiffies, end))
+               cond_resched();
+       ret_from_pending = comm_pending(dev);
+       if (ret_from_pending) {
+               /* check if the slave is trying to boot in the middle of
+                * FLR process. The only non-zero result in the RESET command
+                * is MLX4_DELAY_RESET_SLAVE*/
+               if ((MLX4_COMM_CMD_RESET == cmd)) {
+                       mlx4_warn(dev, "Got slave FLRed from Communication"
+                                 " channel (ret:0x%x)\n", ret_from_pending);
+                       err = MLX4_DELAY_RESET_SLAVE;
+               } else {
+                       mlx4_warn(dev, "Communication channel timed out\n");
+                       err = -ETIMEDOUT;
+               }
+       }
+
+       up(&priv->cmd.poll_sem);
+       return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+                             u16 param, unsigned long timeout)
+{
+       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+       struct mlx4_cmd_context *context;
+       int err = 0;
+
+       down(&cmd->event_sem);
+
+       spin_lock(&cmd->context_lock);
+       BUG_ON(cmd->free_head < 0);
+       context = &cmd->context[cmd->free_head];
+       context->token += cmd->token_mask + 1;
+       cmd->free_head = context->next;
+       spin_unlock(&cmd->context_lock);
+
+       init_completion(&context->done);
+
+       mlx4_comm_cmd_post(dev, op, param);
+
+       if (!wait_for_completion_timeout(&context->done,
+                                        msecs_to_jiffies(timeout))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = context->result;
+       if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, context->fw_status);
+               goto out;
+       }
+
+out:
+       spin_lock(&cmd->context_lock);
+       context->next = cmd->free_head;
+       cmd->free_head = context - cmd->context;
+       spin_unlock(&cmd->context_lock);
+
+       up(&cmd->event_sem);
+       return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+                 unsigned long timeout)
+{
+       if (mlx4_priv(dev)->cmd.use_events)
+               return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+       return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
 static int cmd_pending(struct mlx4_dev *dev)
 {
        u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 
        while (cmd_pending(dev)) {
-               if (time_after_eq(jiffies, end))
+               if (time_after_eq(jiffies, end)) {
+                       mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
                        goto out;
+               }
                cond_resched();
        }
 
@@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                                               (cmd->toggle << HCR_T_BIT)       |
                                               (event ? (1 << HCR_E_BIT) : 0)   |
                                               (op_modifier << HCR_OPMOD_SHIFT) |
-                                              op),                       hcr + 6);
+                                              op), hcr + 6);
 
        /*
         * Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@ out:
        return ret;
 }
 
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+                         int out_is_imm, u32 in_modifier, u8 op_modifier,
+                         u16 op, unsigned long timeout)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+       int ret;
+
+       down(&priv->cmd.slave_sem);
+       vhcr->in_param = cpu_to_be64(in_param);
+       vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+       vhcr->in_modifier = cpu_to_be32(in_modifier);
+       vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+       vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+       vhcr->status = 0;
+       vhcr->flags = !!(priv->cmd.use_events) << 6;
+       if (mlx4_is_master(dev)) {
+               ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+               if (!ret) {
+                       if (out_is_imm) {
+                               if (out_param)
+                                       *out_param =
+                                               be64_to_cpu(vhcr->out_param);
+                               else {
+                                       mlx4_err(dev, "response expected while"
+                                                "output mailbox is NULL for "
+                                                "command 0x%x\n", op);
+                                       vhcr->status = CMD_STAT_BAD_PARAM;
+                               }
+                       }
+                       ret = mlx4_status_to_errno(vhcr->status);
+               }
+       } else {
+               ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+                                   MLX4_COMM_TIME + timeout);
+               if (!ret) {
+                       if (out_is_imm) {
+                               if (out_param)
+                                       *out_param =
+                                               be64_to_cpu(vhcr->out_param);
+                               else {
+                                       mlx4_err(dev, "response expected while"
+                                                "output mailbox is NULL for "
+                                                "command 0x%x\n", op);
+                                       vhcr->status = CMD_STAT_BAD_PARAM;
+                               }
+                       }
+                       ret = mlx4_status_to_errno(vhcr->status);
+               } else
+                       mlx4_err(dev, "failed execution of VHCR_POST command"
+                                "opcode 0x%x\n", op);
+       }
+       up(&priv->cmd.slave_sem);
+       return ret;
+}
+
 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                         int out_is_imm, u32 in_modifier, u8 op_modifier,
                         u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        void __iomem *hcr = priv->cmd.hcr;
        int err = 0;
        unsigned long end;
+       u32 stat;
 
        down(&priv->cmd.poll_sem);
 
@@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
                        (u64) be32_to_cpu((__force __be32)
                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
-       err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
-                                              __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+       stat = be32_to_cpu((__force __be32)
+                          __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+       err = mlx4_status_to_errno(stat);
+       if (err)
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, stat);
 
 out:
        up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
        if (token != context->token)
                return;
 
+       context->fw_status = status;
        context->result    = mlx4_status_to_errno(status);
        context->out_param = out_param;
 
@@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
                      in_modifier, op_modifier, op, context->token, 1);
 
-       if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+       if (!wait_for_completion_timeout(&context->done,
+                                        msecs_to_jiffies(timeout))) {
                err = -EBUSY;
                goto out;
        }
 
        err = context->result;
-       if (err)
+       if (err) {
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, context->fw_status);
                goto out;
+       }
 
        if (out_is_imm)
                *out_param = context->out_param;
@@ -311,17 +521,1046 @@ out:
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
               int out_is_imm, u32 in_modifier, u8 op_modifier,
-              u16 op, unsigned long timeout)
+              u16 op, unsigned long timeout, int native)
 {
-       if (mlx4_priv(dev)->cmd.use_events)
-               return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
-       else
-               return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
+       if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+               if (mlx4_priv(dev)->cmd.use_events)
+                       return mlx4_cmd_wait(dev, in_param, out_param,
+                                            out_is_imm, in_modifier,
+                                            op_modifier, op, timeout);
+               else
+                       return mlx4_cmd_poll(dev, in_param, out_param,
+                                            out_is_imm, in_modifier,
+                                            op_modifier, op, timeout);
+       }
+       return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+                             in_modifier, op_modifier, op, timeout);
 }
 EXPORT_SYMBOL_GPL(__mlx4_cmd);
 
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+                          int slave, u64 slave_addr,
+                          int size, int is_read)
+{
+       u64 in_param;
+       u64 out_param;
+
+       if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+           (slave & ~0x7f) | (size & 0xff)) {
+               mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+                             "master_addr:0x%llx slave_id:%d size:%d\n",
+                             slave_addr, master_addr, slave, size);
+               return -EINVAL;
+       }
+
+       if (is_read) {
+               in_param = (u64) slave | slave_addr;
+               out_param = (u64) dev->caps.function | master_addr;
+       } else {
+               in_param = (u64) dev->caps.function | master_addr;
+               out_param = (u64) slave | slave_addr;
+       }
+
+       return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+                           MLX4_CMD_ACCESS_MEM,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+                    struct mlx4_vhcr *vhcr,
+                    struct mlx4_cmd_mailbox *inbox,
+                    struct mlx4_cmd_mailbox *outbox,
+                    struct mlx4_cmd_info *cmd)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+       out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+       if (cmd->encode_slave_id) {
+               in_param &= 0xffffffffffffff00ll;
+               in_param |= slave;
+       }
+
+       err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+                        vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+       if (cmd->out_is_imm)
+               vhcr->out_param = out_param;
+
+       return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+       {
+               .opcode = MLX4_CMD_QUERY_FW,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_HCA,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_DEV_CAP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_ADAPTER,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_INIT_PORT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_INIT_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_CLOSE_PORT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm  = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CLOSE_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_PORT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SET_PORT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_MAP_EQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_MAP_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_EQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_NOP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_ALLOC_RES,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = true,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_ALLOC_RES_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_FREE_RES,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_FREE_RES_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_MPT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_MPT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_MPT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_READ_MTT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_WRITE_MTT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_WRITE_MTT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SYNC_TPT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_EQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_EQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_CQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_CQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_CQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_MODIFY_CQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = true,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_MODIFY_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_SRQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_SRQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_SRQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_ARM_SRQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_ARM_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RST2INIT_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_RST2INIT_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INIT2INIT_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INIT2RTR_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_INIT2RTR_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTR2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTS2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQERR2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_2ERR_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTS2SQD_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQD2SQD_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQD2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_2RST_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_2RST_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_QP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SUSPEND_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_UNSUSPEND_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_IF_STAT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_IF_STAT_wrapper
+       },
+       /* Native multicast commands are not available for guests */
+       {
+               .opcode = MLX4_CMD_QP_ATTACH,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QP_ATTACH_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_PROMISC,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_PROMISC_wrapper
+       },
+       /* Ethernet specific commands */
+       {
+               .opcode = MLX4_CMD_SET_VLAN_FLTR,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SET_MCAST_FLTR,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_DUMP_ETH_STATS,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INFORM_FLR_DONE,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+                                   struct mlx4_vhcr_cmd *in_vhcr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_info *cmd = NULL;
+       struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+       struct mlx4_vhcr *vhcr;
+       struct mlx4_cmd_mailbox *inbox = NULL;
+       struct mlx4_cmd_mailbox *outbox = NULL;
+       u64 in_param;
+       u64 out_param;
+       int ret = 0;
+       int i;
+       int err = 0;
+
+       /* Create sw representation of Virtual HCR */
+       vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+       if (!vhcr)
+               return -ENOMEM;
+
+       /* DMA in the vHCR */
+       if (!in_vhcr) {
+               ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+                                     priv->mfunc.master.slave_state[slave].vhcr_dma,
+                                     ALIGN(sizeof(struct mlx4_vhcr_cmd),
+                                           MLX4_ACCESS_MEM_ALIGN), 1);
+               if (ret) {
+                       mlx4_err(dev, "%s:Failed reading vhcr"
+                                "ret: 0x%x\n", __func__, ret);
+                       kfree(vhcr);
+                       return ret;
+               }
+       }
+
+       /* Fill SW VHCR fields */
+       vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+       vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+       vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+       vhcr->token = be16_to_cpu(vhcr_cmd->token);
+       vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+       vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+       vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+       /* Lookup command */
+       for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+               if (vhcr->op == cmd_info[i].opcode) {
+                       cmd = &cmd_info[i];
+                       break;
+               }
+       }
+       if (!cmd) {
+               mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+                        vhcr->op, slave);
+               vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+               goto out_status;
+       }
+
+       /* Read inbox */
+       if (cmd->has_inbox) {
+               vhcr->in_param &= INBOX_MASK;
+               inbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(inbox)) {
+                       vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+                       inbox = NULL;
+                       goto out_status;
+               }
+
+               if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+                                   vhcr->in_param,
+                                   MLX4_MAILBOX_SIZE, 1)) {
+                       mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+                                __func__, cmd->opcode);
+                       vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+                       goto out_status;
+               }
+       }
+
+       /* Apply permission and bound checks if applicable */
+       if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+               mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+                         "checks for resource_id:%d\n", vhcr->op, slave,
+                         vhcr->in_modifier);
+               vhcr_cmd->status = CMD_STAT_BAD_OP;
+               goto out_status;
+       }
+
+       /* Allocate outbox */
+       if (cmd->has_outbox) {
+               outbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(outbox)) {
+                       vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+                       outbox = NULL;
+                       goto out_status;
+               }
+       }
+
+       /* Execute the command! */
+       if (cmd->wrapper) {
+               err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+                                  cmd);
+               if (cmd->out_is_imm)
+                       vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+       } else {
+               in_param = cmd->has_inbox ? (u64) inbox->dma :
+                       vhcr->in_param;
+               out_param = cmd->has_outbox ? (u64) outbox->dma :
+                       vhcr->out_param;
+               err = __mlx4_cmd(dev, in_param, &out_param,
+                                cmd->out_is_imm, vhcr->in_modifier,
+                                vhcr->op_modifier, vhcr->op,
+                                MLX4_CMD_TIME_CLASS_A,
+                                MLX4_CMD_NATIVE);
+
+               if (cmd->out_is_imm) {
+                       vhcr->out_param = out_param;
+                       vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+               }
+       }
+
+       if (err) {
+               mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+                         " error:%d, status %d\n",
+                         vhcr->op, slave, vhcr->errno, err);
+               vhcr_cmd->status = mlx4_errno_to_status(err);
+               goto out_status;
+       }
+
+
+       /* Write outbox if command completed successfully */
+       if (cmd->has_outbox && !vhcr_cmd->status) {
+               ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+                                     vhcr->out_param,
+                                     MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+               if (ret) {
+                       /* If we failed to write back the outbox after the
+                        *command was successfully executed, we must fail this
+                        * slave, as it is now in undefined state */
+                       mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+                       goto out;
+               }
+       }
+
+out_status:
+       /* DMA back vhcr result */
+       if (!in_vhcr) {
+               ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+                                     priv->mfunc.master.slave_state[slave].vhcr_dma,
+                                     ALIGN(sizeof(struct mlx4_vhcr),
+                                           MLX4_ACCESS_MEM_ALIGN),
+                                     MLX4_CMD_WRAPPED);
+               if (ret)
+                       mlx4_err(dev, "%s:Failed writing vhcr result\n",
+                                __func__);
+               else if (vhcr->e_bit &&
+                        mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+                               mlx4_warn(dev, "Failed to generate command completion "
+                                         "eqe for slave %d\n", slave);
+       }
+
+out:
+       kfree(vhcr);
+       mlx4_free_cmd_mailbox(dev, inbox);
+       mlx4_free_cmd_mailbox(dev, outbox);
+       return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+                              u16 param, u8 toggle)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+       u32 reply;
+       u32 slave_status = 0;
+       u8 is_going_down = 0;
+
+       slave_state[slave].comm_toggle ^= 1;
+       reply = (u32) slave_state[slave].comm_toggle << 31;
+       if (toggle != slave_state[slave].comm_toggle) {
+               mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+                         "STATE COMPROMISIED ***\n", toggle, slave);
+               goto reset_slave;
+       }
+       if (cmd == MLX4_COMM_CMD_RESET) {
+               mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+               slave_state[slave].active = false;
+               /*check if we are in the middle of FLR process,
+               if so return "retry" status to the slave*/
+               if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+                       slave_status = MLX4_DELAY_RESET_SLAVE;
+                       goto inform_slave_state;
+               }
+
+               /* write the version in the event field */
+               reply |= mlx4_comm_get_version();
+
+               goto reset_slave;
+       }
+       /*command from slave in the middle of FLR*/
+       if (cmd != MLX4_COMM_CMD_RESET &&
+           MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+               mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+                         "in the middle of FLR\n", slave, cmd);
+               return;
+       }
+
+       switch (cmd) {
+       case MLX4_COMM_CMD_VHCR0:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma = ((u64) param) << 48;
+               priv->mfunc.master.slave_state[slave].cookie = 0;
+               mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+               break;
+       case MLX4_COMM_CMD_VHCR1:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+               break;
+       case MLX4_COMM_CMD_VHCR2:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+               break;
+       case MLX4_COMM_CMD_VHCR_EN:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= param;
+               slave_state[slave].active = true;
+               break;
+       case MLX4_COMM_CMD_VHCR_POST:
+               if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+                   (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+                       goto reset_slave;
+               down(&priv->cmd.slave_sem);
+               if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+                       mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+                                " reseting slave.\n", slave);
+                       up(&priv->cmd.slave_sem);
+                       goto reset_slave;
+               }
+               up(&priv->cmd.slave_sem);
+               break;
+       default:
+               mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+               goto reset_slave;
+       }
+       spin_lock(&priv->mfunc.master.slave_state_lock);
+       if (!slave_state[slave].is_slave_going_down)
+               slave_state[slave].last_cmd = cmd;
+       else
+               is_going_down = 1;
+       spin_unlock(&priv->mfunc.master.slave_state_lock);
+       if (is_going_down) {
+               mlx4_warn(dev, "Slave is going down aborting command(%d)"
+                         " executing from slave:%d\n",
+                         cmd, slave);
+               return;
+       }
+       __raw_writel((__force u32) cpu_to_be32(reply),
+                    &priv->mfunc.comm[slave].slave_read);
+       mmiowb();
+
+       return;
+
+reset_slave:
+       /* cleanup any slave resources */
+       mlx4_delete_all_resources_for_slave(dev, slave);
+       spin_lock(&priv->mfunc.master.slave_state_lock);
+       if (!slave_state[slave].is_slave_going_down)
+               slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+       spin_unlock(&priv->mfunc.master.slave_state_lock);
+       /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+       memset(&slave_state[slave].event_eq, 0,
+              sizeof(struct mlx4_slave_event_eq_info));
+       __raw_writel((__force u32) cpu_to_be32(reply),
+                    &priv->mfunc.comm[slave].slave_read);
+       wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work,
+                            struct mlx4_mfunc_master_ctx,
+                            comm_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv =
+               container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       __be32 *bit_vec;
+       u32 comm_cmd;
+       u32 vec;
+       int i, j, slave;
+       int toggle;
+       int served = 0;
+       int reported = 0;
+       u32 slt;
+
+       bit_vec = master->comm_arm_bit_vector;
+       for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+               vec = be32_to_cpu(bit_vec[i]);
+               for (j = 0; j < 32; j++) {
+                       if (!(vec & (1 << j)))
+                               continue;
+                       ++reported;
+                       slave = (i * 32) + j;
+                       comm_cmd = swab32(readl(
+                                         &mfunc->comm[slave].slave_write));
+                       slt = swab32(readl(&mfunc->comm[slave].slave_read))
+                                    >> 31;
+                       toggle = comm_cmd >> 31;
+                       if (toggle != slt) {
+                               if (master->slave_state[slave].comm_toggle
+                                   != slt) {
+                                       printk(KERN_INFO "slave %d out of sync."
+                                              " read toggle %d, state toggle %d. "
+                                              "Resynching.\n", slave, slt,
+                                              master->slave_state[slave].comm_toggle);
+                                       master->slave_state[slave].comm_toggle =
+                                               slt;
+                               }
+                               mlx4_master_do_cmd(dev, slave,
+                                                  comm_cmd >> 16 & 0xff,
+                                                  comm_cmd & 0xffff, toggle);
+                               ++served;
+                       }
+               }
+       }
+
+       if (reported && reported != served)
+               mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+                         " but %d were served\n",
+                         reported, served);
+
+       if (mlx4_ARM_COMM_CHANNEL(dev))
+               mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int wr_toggle;
+       int rd_toggle;
+       unsigned long end;
+
+       wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+       end = jiffies + msecs_to_jiffies(5000);
+
+       while (time_before(jiffies, end)) {
+               rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+               if (rd_toggle == wr_toggle) {
+                       priv->cmd.comm_toggle = rd_toggle;
+                       return 0;
+               }
+
+               cond_resched();
+       }
+
+       /*
+        * we could reach here if for example the previous VM using this
+        * function misbehaved and left the channel with unsynced state. We
+        * should fix this here and give this VM a chance to use a properly
+        * synced channel
+        */
+       mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+       __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+       __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+       priv->cmd.comm_toggle = 0;
+
+       return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state;
+       int i, err, port;
+
+       priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                           &priv->mfunc.vhcr_dma,
+                                           GFP_KERNEL);
+       if (!priv->mfunc.vhcr) {
+               mlx4_err(dev, "Couldn't allocate vhcr.\n");
+               return -ENOMEM;
+       }
+
+       if (mlx4_is_master(dev))
+               priv->mfunc.comm =
+               ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+                       priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+       else
+               priv->mfunc.comm =
+               ioremap(pci_resource_start(dev->pdev, 2) +
+                       MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+       if (!priv->mfunc.comm) {
+               mlx4_err(dev, "Couldn't map communication vector.\n");
+               goto err_vhcr;
+       }
+
+       if (mlx4_is_master(dev)) {
+               priv->mfunc.master.slave_state =
+                       kzalloc(dev->num_slaves *
+                               sizeof(struct mlx4_slave_state), GFP_KERNEL);
+               if (!priv->mfunc.master.slave_state)
+                       goto err_comm;
+
+               for (i = 0; i < dev->num_slaves; ++i) {
+                       s_state = &priv->mfunc.master.slave_state[i];
+                       s_state->last_cmd = MLX4_COMM_CMD_RESET;
+                       __raw_writel((__force u32) 0,
+                                    &priv->mfunc.comm[i].slave_write);
+                       __raw_writel((__force u32) 0,
+                                    &priv->mfunc.comm[i].slave_read);
+                       mmiowb();
+                       for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+                               s_state->vlan_filter[port] =
+                                       kzalloc(sizeof(struct mlx4_vlan_fltr),
+                                               GFP_KERNEL);
+                               if (!s_state->vlan_filter[port]) {
+                                       if (--port)
+                                               kfree(s_state->vlan_filter[port]);
+                                       goto err_slaves;
+                               }
+                               INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+                       }
+                       spin_lock_init(&s_state->lock);
+               }
+
+               memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+               priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+               INIT_WORK(&priv->mfunc.master.comm_work,
+                         mlx4_master_comm_channel);
+               INIT_WORK(&priv->mfunc.master.slave_event_work,
+                         mlx4_gen_slave_eqe);
+               INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+                         mlx4_master_handle_slave_flr);
+               spin_lock_init(&priv->mfunc.master.slave_state_lock);
+               priv->mfunc.master.comm_wq =
+                       create_singlethread_workqueue("mlx4_comm");
+               if (!priv->mfunc.master.comm_wq)
+                       goto err_slaves;
+
+               if (mlx4_init_resource_tracker(dev))
+                       goto err_thread;
+
+               sema_init(&priv->cmd.slave_sem, 1);
+               err = mlx4_ARM_COMM_CHANNEL(dev);
+               if (err) {
+                       mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+                                err);
+                       goto err_resource;
+               }
+
+       } else {
+               err = sync_toggles(dev);
+               if (err) {
+                       mlx4_err(dev, "Couldn't sync toggles\n");
+                       goto err_comm;
+               }
+
+               sema_init(&priv->cmd.slave_sem, 1);
+       }
+       return 0;
+
+err_resource:
+       mlx4_free_resource_tracker(dev);
+err_thread:
+       flush_workqueue(priv->mfunc.master.comm_wq);
+       destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+       while (--i) {
+               for (port = 1; port <= MLX4_MAX_PORTS; port++)
+                       kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+       }
+       kfree(priv->mfunc.master.slave_state);
+err_comm:
+       iounmap(priv->mfunc.comm);
+err_vhcr:
+       dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                            priv->mfunc.vhcr,
+                                            priv->mfunc.vhcr_dma);
+       priv->mfunc.vhcr = NULL;
+       return -ENOMEM;
+}
+
 int mlx4_cmd_init(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1570,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        priv->cmd.use_events = 0;
        priv->cmd.toggle     = 1;
 
-       priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
-                               MLX4_HCR_SIZE);
-       if (!priv->cmd.hcr) {
-               mlx4_err(dev, "Couldn't map command register.");
-               return -ENOMEM;
+       priv->cmd.hcr = NULL;
+       priv->mfunc.vhcr = NULL;
+
+       if (!mlx4_is_slave(dev)) {
+               priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+                                       MLX4_HCR_BASE, MLX4_HCR_SIZE);
+               if (!priv->cmd.hcr) {
+                       mlx4_err(dev, "Couldn't map command register.\n");
+                       return -ENOMEM;
+               }
        }
 
        priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
                                         MLX4_MAILBOX_SIZE,
                                         MLX4_MAILBOX_SIZE, 0);
-       if (!priv->cmd.pool) {
-               iounmap(priv->cmd.hcr);
-               return -ENOMEM;
-       }
+       if (!priv->cmd.pool)
+               goto err_hcr;
 
        return 0;
+
+err_hcr:
+       if (!mlx4_is_slave(dev))
+               iounmap(priv->cmd.hcr);
+       return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i, port;
+
+       if (mlx4_is_master(dev)) {
+               flush_workqueue(priv->mfunc.master.comm_wq);
+               destroy_workqueue(priv->mfunc.master.comm_wq);
+               for (i = 0; i < dev->num_slaves; i++) {
+                       for (port = 1; port <= MLX4_MAX_PORTS; port++)
+                               kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+               }
+               kfree(priv->mfunc.master.slave_state);
+               iounmap(priv->mfunc.comm);
+               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                                    priv->mfunc.vhcr,
+                                                    priv->mfunc.vhcr_dma);
+               priv->mfunc.vhcr = NULL;
+       }
 }
 
 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1622,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        pci_pool_destroy(priv->cmd.pool);
-       iounmap(priv->cmd.hcr);
+
+       if (!mlx4_is_slave(dev))
+               iounmap(priv->cmd.hcr);
 }
 
 /*
@@ -365,6 +1635,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        int i;
+       int err = 0;
 
        priv->cmd.context = kmalloc(priv->cmd.max_cmds *
                                   sizeof (struct mlx4_cmd_context),
@@ -389,11 +1660,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
                ; /* nothing */
        --priv->cmd.token_mask;
 
-       priv->cmd.use_events = 1;
-
        down(&priv->cmd.poll_sem);
+       priv->cmd.use_events = 1;
 
-       return 0;
+       return err;
 }
 
 /*
@@ -433,7 +1703,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
 
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+                          struct mlx4_cmd_mailbox *mailbox)
 {
        if (!mailbox)
                return;
@@ -442,3 +1713,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
        kfree(mailbox);
 }
 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+        return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
index 499a5168892aa045399f38b11197cc9e21e6d1dd..475f9d6af9552b1c31c29bbcabc112653499d89e 100644 (file)
@@ -34,9 +34,9 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/hardirq.h>
 #include <linux/export.h>
-#include <linux/gfp.h>
 
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/cq.h>
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_cq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       __be32                  logsize_usrpage;
-       __be16                  cq_period;
-       __be16                  cq_max_count;
-       u8                      reserved2[3];
-       u8                      comp_eqn;
-       u8                      log_page_size;
-       u8                      reserved3[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  last_notified_index;
-       __be32                  solicit_producer_index;
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved4[2];
-       __be64                  db_rec_addr;
-};
-
 #define MLX4_CQ_STATUS_OK              ( 0 << 28)
 #define MLX4_CQ_STATUS_OVERFLOW                ( 9 << 28)
 #define MLX4_CQ_STATUS_WRITE_FAIL      (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
        cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
                               cqn & (dev->caps.num_cqs - 1));
        if (!cq) {
-               mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+               mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
                return;
        }
 
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+                       MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num, u32 opmod)
 {
        return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
-                           mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
-                           MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+                           cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
 
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+       int err;
+
+       *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+       if (*cqn == -1)
+               return -ENOMEM;
+
+       err = mlx4_table_get(dev, &cq_table->table, *cqn);
+       if (err)
+               goto err_out;
+
+       err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+       if (err)
+               goto err_put;
+       return 0;
+
+err_put:
+       mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+       mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+       return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
+               else {
+                       *cqn = get_param_l(&out_param);
+                       return 0;
+               }
+       }
+       return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+       mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+       mlx4_table_put(dev, &cq_table->table, cqn);
+       mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, cqn);
+               err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+       } else
+               __mlx4_cq_free_icm(dev, cqn);
+}
+
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
                  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
                  unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 
        cq->vector = vector;
 
-       cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
-       if (cq->cqn == -1)
-               return -ENOMEM;
-
-       err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
-       if (err)
-               goto err_out;
-
-       err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+       err = mlx4_cq_alloc_icm(dev, &cq->cqn);
        if (err)
-               goto err_put;
+               return err;
 
        spin_lock_irq(&cq_table->lock);
        err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
        spin_unlock_irq(&cq_table->lock);
        if (err)
-               goto err_cmpt_put;
+               goto err_icm;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
        radix_tree_delete(&cq_table->tree, cq->cqn);
        spin_unlock_irq(&cq_table->lock);
 
-err_cmpt_put:
-       mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+       mlx4_cq_free_icm(dev, cq->cqn);
 
        return err;
 }
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
                complete(&cq->free);
        wait_for_completion(&cq->free);
 
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+       mlx4_cq_free_icm(dev, cq->cqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_free);
 
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
        spin_lock_init(&cq_table->lock);
        INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
                               dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
        /* Nothing to do to clean up radix_tree */
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
 }
index 227997d775e858b08a7cde4e92036738c803acc9..00b81272e31404eb47a59bccd3d9012565df2f26 100644 (file)
@@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
        int err;
 
        cq->size = entries;
-       if (mode == RX)
-               cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
-       else
-               cq->buf_size = sizeof(struct mlx4_cqe);
+       cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
 
        cq->ring = ring;
        cq->is_tx = mode;
@@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                cq->size = priv->rx_ring[cq->ring].actual_size;
 
        err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
-                           cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+                           cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
        if (err)
                return err;
 
@@ -147,6 +144,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (priv->mdev->dev->caps.comp_pool && cq->vector)
                mlx4_release_eq(priv->mdev->dev, cq->vector);
+       cq->vector = 0;
        cq->buf_size = 0;
        cq->buf = NULL;
 }
index 74e2a2a8a02bb2e5b9af9d5bbaee623ac1b2e139..7dbc6a2307798a164b0e7f7520bcbfcb88634aa0 100644 (file)
@@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
 
-       strncpy(drvinfo->driver, DRV_NAME, 32);
-       strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
-       sprintf(drvinfo->fw_version, "%d.%d.%d",
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d",
                (u16) (mdev->dev->caps.fw_ver >> 32),
                (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
                (u16) (mdev->dev->caps.fw_ver & 0xffff));
-       strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->regdump_len = 0;
        drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev,
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        int err = 0;
        u64 config = 0;
+       u64 mask;
 
-       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+       if ((priv->port < 1) || (priv->port > 2)) {
+               en_err(priv, "Failed to get WoL information\n");
+               return;
+       }
+
+       mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+               MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+       if (!(priv->mdev->dev->caps.flags & mask)) {
                wol->supported = 0;
                wol->wolopts = 0;
                return;
@@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev,
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        u64 config = 0;
        int err = 0;
+       u64 mask;
+
+       if ((priv->port < 1) || (priv->port > 2))
+               return -EOPNOTSUPP;
+
+       mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+               MLX4_DEV_CAP_FLAG_WOL_PORT2;
 
-       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+       if (!(priv->mdev->dev->caps.flags & mask))
                return -EOPNOTSUPP;
 
        if (wol->supported & ~WAKE_MAGIC)
index 78d776bc355c747d37fafa77a90117daead68ec3..72fa807b69ce1581ac62dea599ce121e19e74f9c 100644 (file)
@@ -45,7 +45,7 @@
 #include "mlx4_en.h"
 #include "en_port.h"
 
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                en_err(priv, "failed adding vlan %d\n", vid);
        mutex_unlock(&mdev->state_lock);
 
+       return 0;
 }
 
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                        en_err(priv, "Failed configuring VLAN filter\n");
        }
        mutex_unlock(&mdev->state_lock);
+
+       return 0;
 }
 
 u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
        if (priv->port_up) {
                /* Remove old MAC and insert the new one */
                err = mlx4_replace_mac(mdev->dev, priv->port,
-                                      priv->base_qpn, priv->mac, 0);
+                                      priv->base_qpn, priv->mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
        } else
@@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev)
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
        kfree(priv->mc_addrs);
+       priv->mc_addrs = NULL;
        priv->mc_addrs_cnt = 0;
 }
 
@@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
        i = 0;
        netdev_for_each_mc_addr(ha, dev)
                memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+       mlx4_en_clear_list(dev);
        priv->mc_addrs = mc_addrs;
        priv->mc_addrs_cnt = mc_addrs_cnt;
 }
@@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
                goto out;
        }
 
+       if (!netif_carrier_ok(dev)) {
+               if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+                       if (priv->port_state.link_state) {
+                               priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+                               netif_carrier_on(dev);
+                               en_dbg(LINK, priv, "Link Up\n");
+                       }
+               }
+       }
+
        /*
         * Promsicuous mode: disable all filters
         */
@@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev)
                ++rx_index;
        }
 
-       /* Set port mac number */
-       en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
-       err = mlx4_register_mac(mdev->dev, priv->port,
-                               priv->mac, &priv->base_qpn, 0);
+       /* Set qp number */
+       en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+       err = mlx4_get_eth_qp(mdev->dev, priv->port,
+                               priv->mac, &priv->base_qpn);
        if (err) {
-               en_err(priv, "Failed setting port mac\n");
+               en_err(priv, "Failed getting eth qp\n");
                goto cq_err;
        }
        mdev->mac_removed[priv->port] = 0;
@@ -699,7 +714,7 @@ tx_err:
 
        mlx4_en_release_rss_steer(priv);
 mac_err:
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
 cq_err:
        while (rx_index--)
                mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +760,6 @@ void mlx4_en_stop_port(struct net_device *dev)
        /* Flush multicast filter */
        mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
 
-       /* Unregister Mac address for the port */
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
-       mdev->mac_removed[priv->port] = 1;
-
        /* Free TX Rings */
        for (i = 0; i < priv->tx_ring_num; i++) {
                mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +773,10 @@ void mlx4_en_stop_port(struct net_device *dev)
        /* Free RSS qps */
        mlx4_en_release_rss_steer(priv);
 
+       /* Unregister Mac address for the port */
+       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+       mdev->mac_removed[priv->port] = 1;
+
        /* Free RX Rings */
        for (i = 0; i < priv->rx_ring_num; i++) {
                mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -974,6 +989,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+static int mlx4_en_set_features(struct net_device *netdev,
+               netdev_features_t features)
+{
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+       if (features & NETIF_F_LOOPBACK)
+               priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+       else
+               priv->ctrl_flags &=
+                       cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+       return 0;
+
+}
+
 static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_open               = mlx4_en_open,
        .ndo_stop               = mlx4_en_close,
@@ -990,6 +1020,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = mlx4_en_netpoll,
 #endif
+       .ndo_set_features       = mlx4_en_set_features,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1053,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->port = port;
        priv->port_up = false;
        priv->flags = prof->flags;
+       priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+                       MLX4_WQE_CTRL_SOLICITED);
        priv->tx_ring_num = prof->tx_ring_num;
        priv->rx_ring_num = prof->rx_ring_num;
        priv->mac_index = -1;
@@ -1088,6 +1121,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->features = dev->hw_features | NETIF_F_HIGHDMA |
                        NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
                        NETIF_F_HW_VLAN_FILTER;
+       dev->hw_features |= NETIF_F_LOOPBACK;
 
        mdev->pndev[port] = dev;
 
index 03c84cd78cdee36a8178d82ea40c6dab528ca522..331791467a221c327104dbd9ae1ac53a2a950a88 100644 (file)
 #include "mlx4_en.h"
 
 
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
-                       u64 mac, u64 clear, u8 mode)
-{
-       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
-                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
                filter->entry[i] = cpu_to_be32(entry);
        }
        err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
-                      MLX4_CMD_TIME_CLASS_B);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
-                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_general_context *context;
-       int err;
-       u32 in_mod;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->flags = SET_PORT_GEN_ALL_VALID;
-       context->mtu = cpu_to_be16(mtu);
-       context->pptx = (pptx * (!pfctx)) << 7;
-       context->pfctx = pfctx;
-       context->pprx = (pprx * (!pfcrx)) << 7;
-       context->pfcrx = pfcrx;
-
-       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
-                          u8 promisc)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_rqp_calc_context *context;
-       int err;
-       u32 in_mod;
-       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
-                                               MCAST_DIRECT : MCAST_DEFAULT;
-
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
-                       dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
-               return 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->base_qpn = cpu_to_be32(base_qpn);
-       context->n_mac = dev->caps.log_num_macs;
-       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
-                                      base_qpn);
-       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
-                                    base_qpn);
-       context->intra_no_vlan = 0;
-       context->no_vlan = MLX4_NO_VLAN_IDX;
-       context->intra_vlan_miss = 0;
-       context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
-       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
                return PTR_ERR(mailbox);
        memset(mailbox->buf, 0, sizeof(*qport_context));
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
-                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err)
                goto out;
        qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                return PTR_ERR(mailbox);
        memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
-                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err)
                goto out;
 
index 19eb244f516534a9993264336c3fe714245ef9fd..6934fd7e66ed766c987aff56ffbe6661e5667b3f 100644 (file)
 #define SET_PORT_PROMISC_SHIFT 31
 #define SET_PORT_MC_PROMISC_SHIFT      30
 
-enum {
-       MLX4_CMD_SET_VLAN_FLTR  = 0x47,
-       MLX4_CMD_SET_MCAST_FLTR = 0x48,
-       MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
-       MCAST_DIRECT_ONLY       = 0,
-       MCAST_DIRECT            = 1,
-       MCAST_DEFAULT           = 2
-};
-
-struct mlx4_set_port_general_context {
-       u8 reserved[3];
-       u8 flags;
-       u16 reserved2;
-       __be16 mtu;
-       u8 pptx;
-       u8 pfctx;
-       u16 reserved3;
-       u8 pprx;
-       u8 pfcrx;
-       u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
-       __be32 base_qpn;
-       u8 rererved;
-       u8 n_mac;
-       u8 n_vlan;
-       u8 n_prio;
-       u8 reserved2[3];
-       u8 mac_miss;
-       u8 intra_no_vlan;
-       u8 no_vlan;
-       u8 intra_vlan_miss;
-       u8 vlan_miss;
-       u8 reserved3[3];
-       u8 no_vlan_prio;
-       __be32 promisc;
-       __be32 mcast;
-};
-
 #define VLAN_FLTR_SIZE 128
 struct mlx4_set_vlan_fltr_mbox {
        __be32 entry[VLAN_FLTR_SIZE];
index 0dfb4ec8a9dd09295de01eb422b68511ab891c5e..bcbc54c16947eac20f840f929f5d9ea3a5be3f91 100644 (file)
@@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        struct mlx4_en_dev *mdev = priv->mdev;
 
        memset(context, 0, sizeof *context);
-       context->flags = cpu_to_be32(7 << 16 | rss << 13);
+       context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
        context->pd = cpu_to_be32(mdev->priv_pdn);
        context->mtu_msgmax = 0xff;
        if (!is_tx && !rss)
index b89c36dbf5b3ea7927fb46252819210bdd55ec26..e8d6ad2dce0afaaa2ffd27017238cd0482334039 100644 (file)
@@ -541,6 +541,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        unsigned int length;
        int polled = 0;
        int ip_summed;
+       struct ethhdr *ethh;
+       u64 s_mac;
 
        if (!priv->port_up)
                return 0;
@@ -577,10 +579,24 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        goto next;
                }
 
+               /* Get pointer to first fragment since we haven't skb yet and
+                * cast it to ethhdr struct */
+               ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+                                        skb_frags[0].offset);
+               s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+               /* If source MAC is equal to our own MAC and not performing
+                * the selftest or flb disabled - drop the packet */
+               if (s_mac == priv->mac &&
+                       (!(dev->features & NETIF_F_LOOPBACK) ||
+                        !priv->validate_loopback))
+                       goto next;
+
                /*
                 * Packet is OK - process it.
                 */
                length = be32_to_cpu(cqe->byte_cnt);
+               length -= ring->fcs_del;
                ring->bytes += length;
                ring->packets++;
 
@@ -813,8 +829,11 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
        context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
 
        /* Cancel FCS removal if FW allows */
-       if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+       if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
                context->param3 |= cpu_to_be32(1 << 29);
+               ring->fcs_del = ETH_FCS_LEN;
+       } else
+               ring->fcs_del = 0;
 
        err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
        if (err) {
@@ -833,9 +852,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_rss_map *rss_map = &priv->rss_map;
        struct mlx4_qp_context context;
-       struct mlx4_en_rss_context *rss_context;
+       struct mlx4_rss_context *rss_context;
        void *ptr;
-       u8 rss_mask = 0x3f;
+       u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+                       MLX4_RSS_TCP_IPV6);
        int i, qpn;
        int err = 0;
        int good_qps = 0;
@@ -873,18 +893,21 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
                                priv->rx_ring[0].cqn, &context);
 
-       ptr = ((void *) &context) + 0x3c;
+       ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+                                       + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
        rss_context = ptr;
        rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
                                            (rss_map->base_qpn));
        rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+       if (priv->mdev->profile.udp_rss) {
+               rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+               rss_context->base_qpn_udp = rss_context->default_qpn;
+       }
        rss_context->flags = rss_mask;
-       rss_context->hash_fn = 1;
+       rss_context->hash_fn = MLX4_RSS_HASH_TOP;
        for (i = 0; i < 10; i++)
                rss_context->rss_key[i] = rsskey[i];
 
-       if (priv->mdev->profile.udp_rss)
-               rss_context->base_qpn_udp = rss_context->default_qpn;
        err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
                               &rss_map->indir_qp, &rss_map->indir_state);
        if (err)
index 9fdbcecd499dad25b70c23bc4369e7009ce5eb87..bf2e5d3f177c21ed599b0768276201512946106c 100644 (file)
@@ -43,7 +43,7 @@
 static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
 {
        return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
index d901b4267537b8e9d614f57068513a95748d6e22..9ef9038d0629972315a77a3019a8d565395761d0 100644 (file)
@@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
        return cnt;
 }
 
-
 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_cq *mcq = &cq->mcq;
        struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-       struct mlx4_cqe *cqe = cq->buf;
+       struct mlx4_cqe *cqe;
        u16 index;
-       u16 new_index;
+       u16 new_index, ring_index;
        u32 txbbs_skipped = 0;
-       u32 cq_last_sav;
-
-       /* index always points to the first TXBB of the last polled descriptor */
-       index = ring->cons & ring->size_mask;
-       new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-       if (index == new_index)
-               return;
+       u32 cons_index = mcq->cons_index;
+       int size = cq->size;
+       u32 size_mask = ring->size_mask;
+       struct mlx4_cqe *buf = cq->buf;
 
        if (!priv->port_up)
                return;
 
-       /*
-        * We use a two-stage loop:
-        * - the first samples the HW-updated CQE
-        * - the second frees TXBBs until the last sample
-        * This lets us amortize CQE cache misses, while still polling the CQ
-        * until is quiescent.
-        */
-       cq_last_sav = mcq->cons_index;
-       do {
+       index = cons_index & size_mask;
+       cqe = &buf[index];
+       ring_index = ring->cons & size_mask;
+
+       /* Process all completed CQEs */
+       while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+                       cons_index & size)) {
+               /*
+                * make sure we read the CQE after we read the
+                * ownership bit
+                */
+               rmb();
+
+               /* Skip over last polled CQE */
+               new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
                do {
-                       /* Skip over last polled CQE */
-                       index = (index + ring->last_nr_txbb) & ring->size_mask;
                        txbbs_skipped += ring->last_nr_txbb;
-
-                       /* Poll next CQE */
+                       ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+                       /* free next descriptor */
                        ring->last_nr_txbb = mlx4_en_free_tx_desc(
-                                               priv, ring, index,
-                                               !!((ring->cons + txbbs_skipped) &
-                                                  ring->size));
-                       ++mcq->cons_index;
-
-               } while (index != new_index);
+                                       priv, ring, ring_index,
+                                       !!((ring->cons + txbbs_skipped) &
+                                                       ring->size));
+               } while (ring_index != new_index);
+
+               ++cons_index;
+               index = cons_index & size_mask;
+               cqe = &buf[index];
+       }
 
-               new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-       } while (index != new_index);
-       AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
-                        (u32) (mcq->cons_index - cq_last_sav));
 
        /*
         * To prevent CQ overflow we first update CQ consumer and only then
         * the ring consumer.
         */
+       mcq->cons_index = cons_index;
        mlx4_cq_set_ci(mcq);
        wmb();
        ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
                inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
        }
        tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
-       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+               (!!vlan_tx_tag_present(skb));
        tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
 }
 
@@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Prepare ctrl segement apart opcode+ownership, which depends on
         * whether LSO is used */
        tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+               !!vlan_tx_tag_present(skb);
        tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
-       tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
-                                               MLX4_WQE_CTRL_SOLICITED);
+       tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
                                                         MLX4_WQE_CTRL_TCP_UDP_CSUM);
                ring->tx_csum++;
        }
 
-       if (unlikely(priv->validate_loopback)) {
-               /* Copy dst mac address to wqe */
-               skb_reset_mac_header(skb);
-               ethh = eth_hdr(skb);
-               if (ethh && ethh->h_dest) {
-                       mac = mlx4_en_mac_to_u64(ethh->h_dest);
-                       mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
-                       mac_l = (u32) (mac & 0xffffffff);
-                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
-                       tx_desc->ctrl.imm = cpu_to_be32(mac_l);
-               }
+       /* Copy dst mac address to wqe */
+       skb_reset_mac_header(skb);
+       ethh = eth_hdr(skb);
+       if (ethh && ethh->h_dest) {
+               mac = mlx4_en_mac_to_u64(ethh->h_dest);
+               mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+               mac_l = (u32) (mac & 0xffffffff);
+               tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+               tx_desc->ctrl.imm = cpu_to_be32(mac_l);
        }
 
        /* Handle LSO (TSO) packets */
index 24ee96775996dc459c03c62b537eb120538f7201..1e9b55eb7217a8c725c8e39b2880855e7f88c04a 100644 (file)
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       u8                      log_eq_size;
-       u8                      reserved2[4];
-       u8                      eq_period;
-       u8                      reserved3;
-       u8                      eq_max_count;
-       u8                      reserved4[3];
-       u8                      intr;
-       u8                      log_page_size;
-       u8                      reserved5[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       u32                     reserved6[2];
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved7[4];
-};
-
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
                               (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
                               (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
                               (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
-                              (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
-       u8                      reserved1;
-       u8                      type;
-       u8                      reserved2;
-       u8                      subtype;
-       union {
-               u32             raw[6];
-               struct {
-                       __be32  cqn;
-               } __packed comp;
-               struct {
-                       u16     reserved1;
-                       __be16  token;
-                       u32     reserved2;
-                       u8      reserved3[3];
-                       u8      status;
-                       __be64  out_param;
-               } __packed cmd;
-               struct {
-                       __be32  qpn;
-               } __packed qp;
-               struct {
-                       __be32  srqn;
-               } __packed srq;
-               struct {
-                       __be32  cqn;
-                       u32     reserved1;
-                       u8      reserved2[3];
-                       u8      syndrome;
-               } __packed cq_err;
-               struct {
-                       u32     reserved1[2];
-                       __be32  port;
-               } __packed port_change;
-       }                       event;
-       u8                      reserved3[3];
-       u8                      owner;
-} __packed;
+                              (1ull << MLX4_EVENT_TYPE_CMD)                | \
+                              (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL)       | \
+                              (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
 
 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 {
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
        return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 }
 
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+       struct mlx4_eqe *eqe =
+               &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+       return (!!(eqe->owner & 0x80) ^
+               !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+               eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work, struct mlx4_mfunc_master_ctx,
+                            slave_event_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+       struct mlx4_eqe *eqe;
+       u8 slave;
+       int i;
+
+       for (eqe = next_slave_event_eqe(slave_eq); eqe;
+             eqe = next_slave_event_eqe(slave_eq)) {
+               slave = eqe->slave_id;
+
+               /* All active slaves need to receive the event */
+               if (slave == ALL_SLAVES) {
+                       for (i = 0; i < dev->num_slaves; i++) {
+                               if (i != dev->caps.function &&
+                                   master->slave_state[i].active)
+                                       if (mlx4_GEN_EQE(dev, i, eqe))
+                                               mlx4_warn(dev, "Failed to "
+                                                         " generate event "
+                                                         "for slave %d\n", i);
+                       }
+               } else {
+                       if (mlx4_GEN_EQE(dev, slave, eqe))
+                               mlx4_warn(dev, "Failed to generate event "
+                                              "for slave %d\n", slave);
+               }
+               ++slave_eq->cons;
+       }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+       struct mlx4_eqe *s_eqe =
+               &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+       if ((!!(s_eqe->owner & 0x80)) ^
+           (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+               mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+                         "No free EQE on slave events queue\n", slave);
+               return;
+       }
+
+       memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+       s_eqe->slave_id = slave;
+       /* ensure all information is written before setting the ownersip bit */
+       wmb();
+       s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+       ++slave_eq->prod;
+
+       queue_work(priv->mfunc.master.comm_wq,
+                  &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+                            struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_slave =
+               &priv->mfunc.master.slave_state[slave];
+
+       if (!s_slave->active) {
+               /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+               return;
+       }
+
+       slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work, struct mlx4_mfunc_master_ctx,
+                            slave_flr_event_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv =
+               container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+       int i;
+       int err;
+
+       mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+       for (i = 0 ; i < dev->num_slaves; i++) {
+
+               if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+                       mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+                                "clean slave: %d\n", i);
+
+                       mlx4_delete_all_resources_for_slave(dev, i);
+                       /*return the slave to running mode*/
+                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+                       slave_state[i].is_slave_going_down = 0;
+                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       /*notify the FW:*/
+                       err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+                                      MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+                       if (err)
+                               mlx4_warn(dev, "Failed to notify FW on "
+                                         "FLR done (slave:%d)\n", i);
+               }
+       }
+}
+
 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 {
+       struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_eqe *eqe;
        int cqn;
        int eqes_found = 0;
        int set_ci = 0;
        int port;
+       int slave = 0;
+       int ret;
+       u32 flr_slave;
+       u8 update_slave_state;
+       int i;
 
        while ((eqe = next_eqe_sw(eq))) {
                /*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
                case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
                case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
-                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
-                                     eqe->type);
+                       mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+                       if (mlx4_is_master(dev)) {
+                               /* forward only to slave owning the QP */
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                               RES_QP,
+                                               be32_to_cpu(eqe->event.qp.qpn)
+                                               & 0xffffff, &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_dbg(dev, "QP event %02x(%02x) on "
+                                                "EQ %d at index %u: could "
+                                                "not get slave id (%d)\n",
+                                                eqe->type, eqe->subtype,
+                                                eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+
+                       }
+                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+                                     0xffffff, eqe->type);
                        break;
 
                case MLX4_EVENT_TYPE_SRQ_LIMIT:
+                       mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+                                 __func__);
                case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
-                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
-                                     eqe->type);
+                       if (mlx4_is_master(dev)) {
+                               /* forward only to slave owning the SRQ */
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                               RES_SRQ,
+                                               be32_to_cpu(eqe->event.srq.srqn)
+                                               & 0xffffff,
+                                               &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_warn(dev, "SRQ event %02x(%02x) "
+                                                 "on EQ %d at index %u: could"
+                                                 " not get slave id (%d)\n",
+                                                 eqe->type, eqe->subtype,
+                                                 eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+                                         " event: %02x(%02x)\n", __func__,
+                                         slave,
+                                         be32_to_cpu(eqe->event.srq.srqn),
+                                         eqe->type, eqe->subtype);
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_warn(dev, "%s: sending event "
+                                                 "%02x(%02x) to slave:%d\n",
+                                                  __func__, eqe->type,
+                                                 eqe->subtype, slave);
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+                       }
+                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+                                      0xffffff, eqe->type);
                        break;
 
                case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_PORT_CHANGE:
                        port = be32_to_cpu(eqe->event.port_change.port) >> 28;
                        if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+                               mlx4_dispatch_event(dev,
+                                                   MLX4_DEV_EVENT_PORT_DOWN,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+                               if (mlx4_is_master(dev))
+                                       /*change the state of all slave's port
+                                       * to down:*/
+                                       for (i = 0; i < dev->num_slaves; i++) {
+                                               mlx4_dbg(dev, "%s: Sending "
+                                                        "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+                                                        " to slave: %d, port:%d\n",
+                                                        __func__, i, port);
+                                               if (i == dev->caps.function)
+                                                       continue;
+                                               mlx4_slave_event(dev, i, eqe);
+                                       }
                        } else {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+                               mlx4_dispatch_event(dev,
+                                                   MLX4_DEV_EVENT_PORT_UP,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+                               if (mlx4_is_master(dev)) {
+                                       for (i = 0; i < dev->num_slaves; i++) {
+                                               if (i == dev->caps.function)
+                                                       continue;
+                                               mlx4_slave_event(dev, i, eqe);
+                                       }
+                               }
                        }
                        break;
 
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                  eqe->event.cq_err.syndrome == 1 ?
                                  "overrun" : "access violation",
                                  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
-                       mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+                       if (mlx4_is_master(dev)) {
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                       RES_CQ,
+                                       be32_to_cpu(eqe->event.cq_err.cqn)
+                                       & 0xffffff, &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_dbg(dev, "CQ event %02x(%02x) on "
+                                                "EQ %d at index %u: could "
+                                                 "not get slave id (%d)\n",
+                                                 eqe->type, eqe->subtype,
+                                                 eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+                       }
+                       mlx4_cq_event(dev,
+                                     be32_to_cpu(eqe->event.cq_err.cqn)
+                                     & 0xffffff,
                                      eqe->type);
                        break;
 
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
                        break;
 
+               case MLX4_EVENT_TYPE_COMM_CHANNEL:
+                       if (!mlx4_is_master(dev)) {
+                               mlx4_warn(dev, "Received comm channel event "
+                                              "for non master device\n");
+                               break;
+                       }
+                       memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+                              eqe->event.comm_channel_arm.bit_vec,
+                              sizeof eqe->event.comm_channel_arm.bit_vec);
+                       queue_work(priv->mfunc.master.comm_wq,
+                                  &priv->mfunc.master.comm_work);
+                       break;
+
+               case MLX4_EVENT_TYPE_FLR_EVENT:
+                       flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+                       if (!mlx4_is_master(dev)) {
+                               mlx4_warn(dev, "Non-master function received"
+                                              "FLR event\n");
+                               break;
+                       }
+
+                       mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+                       if (flr_slave > dev->num_slaves) {
+                               mlx4_warn(dev,
+                                         "Got FLR for unknown function: %d\n",
+                                         flr_slave);
+                               update_slave_state = 0;
+                       } else
+                               update_slave_state = 1;
+
+                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       if (update_slave_state) {
+                               priv->mfunc.master.slave_state[flr_slave].active = false;
+                               priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+                               priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+                       }
+                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       queue_work(priv->mfunc.master.comm_wq,
+                                  &priv->mfunc.master.slave_flr_event_work);
+                       break;
                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
                case MLX4_EVENT_TYPE_ECC_DETECT:
                default:
-                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
-                                 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+                                 "index %u. owner=%x, nent=0x%x, slave=%x, "
+                                 "ownership=%s\n",
+                                 eqe->type, eqe->subtype, eq->eqn,
+                                 eq->cons_index, eqe->owner, eq->nent,
+                                 eqe->slave_id,
+                                 !!(eqe->owner & 0x80) ^
+                                 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
                        break;
-               }
+               };
 
                ++eq->cons_index;
                eqes_found = 1;
@@ -290,25 +505,58 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
        return IRQ_HANDLED;
 }
 
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq_info *event_eq =
+               &priv->mfunc.master.slave_state[slave].event_eq;
+       u32 in_modifier = vhcr->in_modifier;
+       u32 eqn = in_modifier & 0x1FF;
+       u64 in_param =  vhcr->in_param;
+       int err = 0;
+
+       if (slave == dev->caps.function)
+               err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+                              0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+                              MLX4_CMD_NATIVE);
+       if (!err) {
+               if (in_modifier >> 31) {
+                       /* unmap */
+                       event_eq->event_type &= ~in_param;
+               } else {
+                       event_eq->eqn = eqn;
+                       event_eq->event_type = in_param;
+               }
+       }
+       return err;
+}
+
 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
                        int eq_num)
 {
        return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
-                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+                       MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
-                           MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+                           0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+                           MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -585,14 +833,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
                priv->eq_table.uar_map[i] = NULL;
 
-       err = mlx4_map_clr_int(dev);
-       if (err)
-               goto err_out_bitmap;
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_map_clr_int(dev);
+               if (err)
+                       goto err_out_bitmap;
 
-       priv->eq_table.clr_mask =
-               swab32(1 << (priv->eq_table.inta_pin & 31));
-       priv->eq_table.clr_int  = priv->clr_base +
-               (priv->eq_table.inta_pin < 32 ? 4 : 0);
+               priv->eq_table.clr_mask =
+                       swab32(1 << (priv->eq_table.inta_pin & 31));
+               priv->eq_table.clr_int  = priv->clr_base +
+                       (priv->eq_table.inta_pin < 32 ? 4 : 0);
+       }
 
        priv->eq_table.irq_names =
                kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +950,8 @@ err_out_unmap:
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
                --i;
        }
-       mlx4_unmap_clr_int(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_unmap_clr_int(dev);
        mlx4_free_irqs(dev);
 
 err_out_bitmap:
@@ -725,7 +976,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
        for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
-       mlx4_unmap_clr_int(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_unmap_clr_int(dev);
 
        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
                if (priv->eq_table.uar_map[i])
@@ -748,7 +1000,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
 
        err = mlx4_NOP(dev);
        /* When not in MSI_X, there is only one irq to check */
-       if (!(dev->flags & MLX4_FLAG_MSI_X))
+       if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
                return err;
 
        /* A loop over all completion vectors, for each vector we will check
index 435ca6e49734b3df44d213a64ff9028e7e912d1f..a424a19280cc466eefba7ab08236f6306e505e12 100644 (file)
@@ -32,6 +32,7 @@
  * SOFTWARE.
  */
 
+#include <linux/etherdevice.h>
 #include <linux/mlx4/cmd.h>
 #include <linux/module.h>
 #include <linux/cache.h>
@@ -48,7 +49,7 @@ enum {
 extern void __buggy_use_of_MLX4_GET(void);
 extern void __buggy_use_of_MLX4_PUT(void);
 
-static int enable_qos;
+static bool enable_qos;
 module_param(enable_qos, bool, 0444);
 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
 
@@ -139,12 +140,185 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
        MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
 
        err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
 
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       u8      field;
+       u32     size;
+       int     err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET            0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET                0x1
+#define QUERY_FUNC_CAP_FUNCTION_OFFSET         0x3
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET          0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET         0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET         0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET                0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET                0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET                0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET                0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET           0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET      0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET                0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET                0xc
+
+       if (vhcr->op_modifier == 1) {
+               field = vhcr->in_modifier;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+               field = 0; /* ensure fvl bit is not set */
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+       } else if (vhcr->op_modifier == 0) {
+               field = 1 << 7; /* enable only ethernet interface */
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+               field = slave;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+
+               field = dev->caps.num_ports;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+               size = 0; /* no PF behavious is set for now */
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+               size = dev->caps.num_qps;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+               size = dev->caps.num_srqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+               size = dev->caps.num_cqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+               size = dev->caps.num_eqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+               size = dev->caps.reserved_eqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+               size = dev->caps.num_mpts;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+               size = dev->caps.num_mtts;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+               size = dev->caps.num_mgms + dev->caps.num_amgms;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+       } else
+               err = -EINVAL;
+
+       return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32                     *outbox;
+       u8                      field;
+       u32                     size;
+       int                     i;
+       int                     err = 0;
+
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+       if (err)
+               goto out;
+
+       outbox = mailbox->buf;
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+       if (!(field & (1 << 7))) {
+               mlx4_err(dev, "The host doesn't support eth interface\n");
+               err = -EPROTONOSUPPORT;
+               goto out;
+       }
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+       func_cap->function = field;
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+       func_cap->num_ports = field;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+       func_cap->pf_context_behaviour = size;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+       func_cap->qp_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+       func_cap->srq_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+       func_cap->cq_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+       func_cap->max_eq = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+       func_cap->reserved_eq = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+       func_cap->mpt_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+       func_cap->mtt_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+       func_cap->mcg_quota = size & 0xFFFFFF;
+
+       for (i = 1; i <= func_cap->num_ports; ++i) {
+               err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+                                  MLX4_CMD_QUERY_FUNC_CAP,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       goto out;
+
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+               if (field & (1 << 7)) {
+                       mlx4_err(dev, "VLAN is enforced on this port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+
+               if (field & (1 << 6)) {
+                       mlx4_err(dev, "Force mac is enabled on this port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+               func_cap->physical_port[i] = field;
+       }
+
+       /* All other resources are allocated by the master, but we still report
+        * 'num' and 'reserved' capabilities as follows:
+        * - num remains the maximum resource index
+        * - 'num - reserved' is the total available objects of a resource, but
+        *   resource indices may be less than 'reserved'
+        * TODO: set per-resource quotas */
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +403,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-                          MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
        if (err)
                goto out;
 
@@ -396,12 +570,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
                for (i = 1; i <= dev_cap->num_ports; ++i) {
                        err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-                                          MLX4_CMD_TIME_CLASS_B);
+                                          MLX4_CMD_TIME_CLASS_B,
+                                          !mlx4_is_slave(dev));
                        if (err)
                                goto out;
 
                        MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
                        dev_cap->supported_port_types[i] = field & 3;
+                       dev_cap->suggested_type[i] = (field >> 3) & 1;
+                       dev_cap->default_sense[i] = (field >> 4) & 1;
                        MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
                        dev_cap->ib_mtu[i]         = field & 0xf;
                        MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +647,61 @@ out:
        return err;
 }
 
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       u64 def_mac;
+       u8 port_type;
+       int err;
+
+#define MLX4_PORT_SUPPORT_IB           (1 << 0)
+#define MLX4_PORT_SUGGEST_TYPE         (1 << 3)
+#define MLX4_PORT_DEFAULT_SENSE                (1 << 4)
+#define MLX4_VF_PORT_ETH_ONLY_MASK     (0xff & ~MLX4_PORT_SUPPORT_IB & \
+                                        ~MLX4_PORT_SUGGEST_TYPE & \
+                                        ~MLX4_PORT_DEFAULT_SENSE)
+
+       err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_NATIVE);
+
+       if (!err && dev->caps.function != slave) {
+               /* set slave default_mac address */
+               MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+               def_mac += slave << 8;
+               MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+               /* get port type - currently only eth is enabled */
+               MLX4_GET(port_type, outbox->buf,
+                        QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+               /* Allow only Eth port, no link sensing allowed */
+               port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
+
+               /* check eth is enabled for this port */
+               if (!(port_type & 2))
+                       mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+               MLX4_PUT(outbox->buf, port_type,
+                        QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+       }
+
+       return err;
+}
+
+static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+       struct mlx4_cmd_mailbox *outbox = ptr;
+
+       return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+                           MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                           MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +751,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 
                        if (++nent == MLX4_MAILBOX_SIZE / 16) {
                                err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
-                                               MLX4_CMD_TIME_CLASS_B);
+                                               MLX4_CMD_TIME_CLASS_B,
+                                               MLX4_CMD_NATIVE);
                                if (err)
                                        goto out;
                                nent = 0;
@@ -528,7 +761,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
        }
 
        if (nent)
-               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+                              MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -557,13 +791,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
 
 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 
 int mlx4_RUN_FW(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +815,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 
 #define QUERY_FW_OUT_SIZE             0x100
 #define QUERY_FW_VER_OFFSET            0x00
+#define QUERY_FW_PPF_ID                       0x09
 #define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
 #define QUERY_FW_ERR_START_OFFSET      0x30
@@ -589,13 +826,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
 #define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
 
+#define QUERY_FW_COMM_BASE_OFFSET      0x40
+#define QUERY_FW_COMM_BAR_OFFSET       0x48
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -608,6 +848,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
                ((fw_ver & 0xffff0000ull) >> 16) |
                ((fw_ver & 0x0000ffffull) << 16);
 
+       MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+       dev->caps.function = lg;
+
        MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
        if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
            cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +892,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
        MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
        fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
 
+       MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+       MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
+       fw->comm_bar = (fw->comm_bar >> 6) * 2;
+       mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+                fw->comm_bar, fw->comm_base);
        mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
 
        /*
@@ -711,7 +959,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
-                          MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -743,6 +991,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 #define         INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
 #define         INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
 #define         INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
+#define         INIT_HCA_EQE_CQE_OFFSETS        (INIT_HCA_QPC_OFFSET + 0x38)
 #define         INIT_HCA_ALTC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
 #define         INIT_HCA_AUXC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
 #define         INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1080,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 
        /* UAR attributes */
 
-       MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       MLX4_PUT(inbox, param->uar_page_sz,     INIT_HCA_UAR_PAGE_SZ_OFFSET);
        MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
 
-       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+                      MLX4_CMD_NATIVE);
 
        if (err)
                mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1093,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
        return err;
 }
 
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+                  struct mlx4_init_hca_param *param)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       __be32 *outbox;
+       int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET   0x04
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+                          MLX4_CMD_QUERY_HCA,
+                          MLX4_CMD_TIME_CLASS_B,
+                          !mlx4_is_slave(dev));
+       if (err)
+               goto out;
+
+       MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+       /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+       MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
+       MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
+       MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
+       MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
+       MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
+       MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
+       MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
+       MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+       MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+       /* multicast attributes */
+
+       MLX4_GET(param->mc_base,         outbox, INIT_HCA_MC_BASE_OFFSET);
+       MLX4_GET(param->log_mc_entry_sz, outbox,
+                INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+       MLX4_GET(param->log_mc_hash_sz,  outbox,
+                INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+       MLX4_GET(param->log_mc_table_sz, outbox,
+                INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+       /* TPT attributes */
+
+       MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
+       MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
+       MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+       /* UAR attributes */
+
+       MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int port = vhcr->in_modifier;
+       int err;
+
+       if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+               return 0;
+
+       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+               return -ENODEV;
+
+       /* Enable port only if it was previously disabled */
+       if (!priv->mfunc.master.init_port_ref[port]) {
+               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+               if (err)
+                       return err;
+               priv->mfunc.master.slave_state[slave].init_port_mask |=
+                       (1 << port);
+       }
+       ++priv->mfunc.master.init_port_ref[port];
+       return 0;
+}
+
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1231,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
                MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
 
                err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
                mlx4_free_cmd_mailbox(dev, mailbox);
        } else
                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
 
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int port = vhcr->in_modifier;
+       int err;
+
+       if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+           (1 << port)))
+               return 0;
+
+       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+               return -ENODEV;
+       if (priv->mfunc.master.init_port_ref[port] == 1) {
+               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+                              MLX4_CMD_NATIVE);
+               if (err)
+                       return err;
+       }
+       priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+       --priv->mfunc.master.init_port_ref[port];
+       return 0;
+}
+
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
 {
-       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+                       MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
 
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
 {
-       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+                       MLX4_CMD_NATIVE);
 }
 
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 {
        int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
                               MLX4_CMD_SET_ICM_SIZE,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (ret)
                return ret;
 
@@ -929,7 +1303,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 int mlx4_NOP(struct mlx4_dev *dev)
 {
        /* Input modifier of 0x1f means "finish as soon as possible." */
-       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
 }
 
 #define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1312,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
        return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
-                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+                           MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_read);
 
@@ -947,6 +1322,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
        return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
-                                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_write);
index bf5ec2286528ca9bbf239c126dd3f706c5b2a55b..119e0cc9fab3d852259e7f4df0651382511f9d68 100644 (file)
@@ -111,11 +111,30 @@ struct mlx4_dev_cap {
        u64 max_icm_sz;
        int max_gso_sz;
        u8  supported_port_types[MLX4_MAX_PORTS + 1];
+       u8  suggested_type[MLX4_MAX_PORTS + 1];
+       u8  default_sense[MLX4_MAX_PORTS + 1];
        u8  log_max_macs[MLX4_MAX_PORTS + 1];
        u8  log_max_vlans[MLX4_MAX_PORTS + 1];
        u32 max_counters;
 };
 
+struct mlx4_func_cap {
+       u8      function;
+       u8      num_ports;
+       u8      flags;
+       u32     pf_context_behaviour;
+       int     qp_quota;
+       int     cq_quota;
+       int     srq_quota;
+       int     mpt_quota;
+       int     mtt_quota;
+       int     max_eq;
+       int     reserved_eq;
+       int     mcg_quota;
+       u8      physical_port[MLX4_MAX_PORTS + 1];
+       u8      port_flags[MLX4_MAX_PORTS + 1];
+};
+
 struct mlx4_adapter {
        char board_id[MLX4_BOARD_ID_LEN];
        u8   inta_pin;
@@ -133,6 +152,7 @@ struct mlx4_init_hca_param {
        u64 dmpt_base;
        u64 cmpt_base;
        u64 mtt_base;
+       u64 global_caps;
        u16 log_mc_entry_sz;
        u16 log_mc_hash_sz;
        u8  log_num_qps;
@@ -143,6 +163,7 @@ struct mlx4_init_hca_param {
        u8  log_mc_table_sz;
        u8  log_mpt_sz;
        u8  log_uar_sz;
+       u8  uar_page_sz; /* log pg sz in 4k chunks */
 };
 
 struct mlx4_init_ib_param {
@@ -167,12 +188,19 @@ struct mlx4_set_ib_param {
 };
 
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_FA(struct mlx4_dev *dev);
 int mlx4_RUN_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
index 02393fdf44c17e57cbf0fd775a86a20fc8f75a5a..a9ade1c3cad50dd1b0c7da2a2b497e857ffc1b3f 100644 (file)
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
 {
        return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
 
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
index ca6feb55bd94721ac4755bf3d57495a74245bb09..b4e9f6f5cc04ef41ec5f87acbd24187a74b58aa9 100644 (file)
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
                mlx4_add_device(intf, priv);
 
        mutex_unlock(&intf_mutex);
-       mlx4_start_catas_poll(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_start_catas_poll(dev);
 
        return 0;
 }
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_interface *intf;
 
-       mlx4_stop_catas_poll(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_stop_catas_poll(dev);
        mutex_lock(&intf_mutex);
 
        list_for_each_entry(intf, &intf_list, list)
index 94bbc85a532d18d2fd67caf5406bcd2a6baac26d..6bb62c580e2d50eab48565e170650b25efac00c9 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
+#include <linux/delay.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
 
 #endif /* CONFIG_PCI_MSI */
 
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+                       mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+                                        " of qp per mcg, for example:"
+                                        " 10 gives 248.range: 9<="
+                                        " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF                                        (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK            0
+#define PF_CONTEXT_BEHAVIOUR_MASK      0
+
 static char mlx4_version[] __devinitdata =
        DRV_NAME ": Mellanox ConnectX core driver v"
        DRV_VERSION " (" DRV_RELDATE ")\n";
 
 static struct mlx4_profile default_profile = {
-       .num_qp         = 1 << 17,
+       .num_qp         = 1 << 18,
        .num_srq        = 1 << 16,
        .rdmarc_per_qp  = 1 << 4,
        .num_cq         = 1 << 16,
        .num_mcg        = 1 << 13,
-       .num_mpt        = 1 << 17,
+       .num_mpt        = 1 << 19,
        .num_mtt        = 1 << 20,
 };
 
-static int log_num_mac = 2;
+static int log_num_mac = 7;
 module_param_named(log_num_mac, log_num_mac, int, 0444);
 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
 
@@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
 /* Log2 max number of VLANs per ETH port (0-7) */
 #define MLX4_LOG_NUM_VLANS 7
 
-static int use_prio;
+static bool use_prio;
 module_param_named(use_prio, use_prio, bool, 0444);
 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
                  "(0/1, default 0)");
 
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
 
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+                               "1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+       struct list_head list;
+       enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+       struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+       return dev->caps.reserved_eqs +
+               MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
 int mlx4_check_port_params(struct mlx4_dev *dev,
                           enum mlx4_port_type *port_type)
 {
@@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
 {
        int i;
 
-       dev->caps.port_mask = 0;
        for (i = 1; i <= dev->caps.num_ports; ++i)
-               if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
-                       dev->caps.port_mask |= 1 << (i - 1);
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
 }
 
 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
                dev->caps.def_mac[i]        = dev_cap->def_mac[i];
                dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+               dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+               dev->caps.default_sense[i] = dev_cap->default_sense[i];
                dev->caps.trans_type[i]     = dev_cap->trans_type[i];
                dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
                dev->caps.wavelength[i]     = dev_cap->wavelength[i];
                dev->caps.trans_code[i]     = dev_cap->trans_code[i];
        }
 
+       dev->caps.uar_page_size      = PAGE_SIZE;
        dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
        dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
        dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
        dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
        dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
-       dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
+       dev->caps.num_qp_per_mgm     = mlx4_get_qp_per_mgm(dev);
        /*
         * Subtract 1 from the limit because we need to allocate a
         * spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
        dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
        dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
-       dev->caps.mtts_per_seg       = 1 << log_mtts_per_seg;
-       dev->caps.reserved_mtts      = DIV_ROUND_UP(dev_cap->reserved_mtts,
-                                                   dev->caps.mtts_per_seg);
+       dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
        dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
-       dev->caps.reserved_uars      = dev_cap->reserved_uars;
+
+       /* The first 128 UARs are used for EQ doorbells */
+       dev->caps.reserved_uars      = max_t(int, 128, dev_cap->reserved_uars);
        dev->caps.reserved_pds       = dev_cap->reserved_pds;
        dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
                                        dev_cap->reserved_xrcds : 0;
        dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
                                        dev_cap->max_xrcds : 0;
-       dev->caps.mtt_entry_sz       = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+       dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
+
        dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
        dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
        dev->caps.flags              = dev_cap->flags;
@@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
        dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
 
+       /* Sense port always allowed on supported devices for ConnectX1 and 2 */
+       if (dev->pdev->device != 0x1003)
+               dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
        dev->caps.log_num_macs  = log_num_mac;
        dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
        dev->caps.log_num_prios = use_prio ? 3 : 0;
 
        for (i = 1; i <= dev->caps.num_ports; ++i) {
-               if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
-               else
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
-               dev->caps.possible_type[i] = dev->caps.port_type[i];
+               dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+               if (dev->caps.supported_type[i]) {
+                       /* if only ETH is supported - assign ETH */
+                       if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+                               dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+                       /* if only IB is supported,
+                        * assign IB only if SRIOV is off*/
+                       else if (dev->caps.supported_type[i] ==
+                                MLX4_PORT_TYPE_IB) {
+                               if (dev->flags & MLX4_FLAG_SRIOV)
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_NONE;
+                               else
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_IB;
+                       /* if IB and ETH are supported,
+                        * first of all check if SRIOV is on */
+                       } else if (dev->flags & MLX4_FLAG_SRIOV)
+                               dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+                       else {
+                               /* In non-SRIOV mode, we set the port type
+                                * according to user selection of port type,
+                                * if usere selected none, take the FW hint */
+                               if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+                                       dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+                                               MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+                               else
+                                       dev->caps.port_type[i] = port_type_array[i-1];
+                       }
+               }
+               /*
+                * Link sensing is allowed on the port if 3 conditions are true:
+                * 1. Both protocols are supported on the port.
+                * 2. Different types are supported on the port
+                * 3. FW declared that it supports link sensing
+                */
                mlx4_priv(dev)->sense.sense_allowed[i] =
-                       dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+                       ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+                        (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+                        (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+               /*
+                * If "default_sense" bit is set, we move the port to "AUTO" mode
+                * and perform sense_port FW command to try and set the correct
+                * port type from beginning
+                */
+               if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
+                       enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+                       dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+                       mlx4_SENSE_PORT(dev, i, &sensed_port);
+                       if (sensed_port != MLX4_PORT_TYPE_NONE)
+                               dev->caps.port_type[i] = sensed_port;
+               } else {
+                       dev->caps.possible_type[i] = dev->caps.port_type[i];
+               }
 
                if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
                        dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
-       mlx4_set_port_mask(dev);
-
        dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
 
        dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,149 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        return 0;
 }
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state;
+       int i;
+       int ret = 0;
+
+       for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+               s_state = &priv->mfunc.master.slave_state[i];
+               if (s_state->active && s_state->last_cmd !=
+                   MLX4_COMM_CMD_RESET) {
+                       mlx4_warn(dev, "%s: slave: %d is still active\n",
+                                 __func__, i);
+                       ret++;
+               }
+       }
+       return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_slave;
+
+       if (!mlx4_is_master(dev))
+               return 0;
+
+       s_slave = &priv->mfunc.master.slave_state[slave];
+       return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+       int                        err;
+       u32                        page_size;
+       struct mlx4_dev_cap        dev_cap;
+       struct mlx4_func_cap       func_cap;
+       struct mlx4_init_hca_param hca_param;
+       int                        i;
+
+       memset(&hca_param, 0, sizeof(hca_param));
+       err = mlx4_QUERY_HCA(dev, &hca_param);
+       if (err) {
+               mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+               return err;
+       }
+
+       /*fail if the hca has an unknown capability */
+       if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+           HCA_GLOBAL_CAP_MASK) {
+               mlx4_err(dev, "Unknown hca global capabilities\n");
+               return -ENOSYS;
+       }
+
+       mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+       memset(&dev_cap, 0, sizeof(dev_cap));
+       err = mlx4_dev_cap(dev, &dev_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               return err;
+       }
+
+       page_size = ~dev->caps.page_size_cap + 1;
+       mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+       if (page_size > PAGE_SIZE) {
+               mlx4_err(dev, "HCA minimum page size of %d bigger than "
+                        "kernel PAGE_SIZE of %ld, aborting.\n",
+                        page_size, PAGE_SIZE);
+               return -ENODEV;
+       }
+
+       /* slave gets uar page size from QUERY_HCA fw command */
+       dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+       /* TODO: relax this assumption */
+       if (dev->caps.uar_page_size != PAGE_SIZE) {
+               mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+                        dev->caps.uar_page_size, PAGE_SIZE);
+               return -ENODEV;
+       }
+
+       memset(&func_cap, 0, sizeof(func_cap));
+       err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+               return err;
+       }
+
+       if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+           PF_CONTEXT_BEHAVIOUR_MASK) {
+               mlx4_err(dev, "Unknown pf context behaviour\n");
+               return -ENOSYS;
+       }
+
+       dev->caps.function              = func_cap.function;
+       dev->caps.num_ports             = func_cap.num_ports;
+       dev->caps.num_qps               = func_cap.qp_quota;
+       dev->caps.num_srqs              = func_cap.srq_quota;
+       dev->caps.num_cqs               = func_cap.cq_quota;
+       dev->caps.num_eqs               = func_cap.max_eq;
+       dev->caps.reserved_eqs          = func_cap.reserved_eq;
+       dev->caps.num_mpts              = func_cap.mpt_quota;
+       dev->caps.num_mtts              = func_cap.mtt_quota;
+       dev->caps.num_pds               = MLX4_NUM_PDS;
+       dev->caps.num_mgms              = 0;
+       dev->caps.num_amgms             = 0;
+
+       for (i = 1; i <= dev->caps.num_ports; ++i)
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+       if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+                        "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+               return -ENODEV;
+       }
+
+       if (dev->caps.uar_page_size * (dev->caps.num_uars -
+                                      dev->caps.reserved_uars) >
+                                      pci_resource_len(dev->pdev, 2)) {
+               mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+                        "PCI resource 2 size of 0x%llx, aborting.\n",
+                        dev->caps.uar_page_size * dev->caps.num_uars,
+                        (unsigned long long) pci_resource_len(dev->pdev, 2));
+               return -ENODEV;
+       }
+
+#if 0
+       mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+       mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+                 dev->caps.num_uars, dev->caps.reserved_uars,
+                 dev->caps.uar_page_size * dev->caps.num_uars,
+                 pci_resource_len(dev->pdev, 2));
+       mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+                 dev->caps.reserved_eqs);
+       mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+                 dev->caps.num_pds, dev->caps.reserved_pds,
+                 dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+       return 0;
+}
 
 /*
  * Change the port configuration of the device.
@@ -377,7 +612,8 @@ static ssize_t set_port_type(struct device *dev,
                        types[i] = mdev->caps.port_type[i+1];
        }
 
-       if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+       if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+           !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
                for (i = 1; i <= mdev->caps.num_ports; i++) {
                        if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
                                mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +687,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        int err;
+       int num_eqs;
 
        err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
                                  cmpt_base +
@@ -480,12 +717,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
        if (err)
                goto err_srq;
 
+       num_eqs = (mlx4_is_master(dev)) ?
+               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+               dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
                                  cmpt_base +
                                  ((u64) (MLX4_CMPT_TYPE_EQ *
                                          cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-                                 cmpt_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+                                 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
        if (err)
                goto err_cq;
 
@@ -509,6 +748,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        u64 aux_pages;
+       int num_eqs;
        int err;
 
        err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +780,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                goto err_unmap_aux;
        }
 
+
+       num_eqs = (mlx4_is_master(dev)) ?
+               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+               dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.table,
                                  init_hca->eqc_base, dev_cap->eqc_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs,
-                                 0, 0);
+                                 num_eqs, num_eqs, 0, 0);
        if (err) {
                mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
                goto err_unmap_cmpt;
@@ -563,7 +806,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
        err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
                                  init_hca->mtt_base,
                                  dev->caps.mtt_entry_sz,
-                                 dev->caps.num_mtt_segs,
+                                 dev->caps.num_mtts,
                                  dev->caps.reserved_mtts, 1, 0);
        if (err) {
                mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +893,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
         * and it's a lot easier than trying to track ref counts.
         */
        err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
-                                 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+                                 init_hca->mc_base,
+                                 mlx4_get_mgm_entry_size(dev),
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  0, 0);
@@ -726,6 +970,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
        mlx4_free_icm(dev, priv->fw.aux_icm, 0);
 }
 
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       down(&priv->cmd.slave_sem);
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+               mlx4_warn(dev, "Failed to close slave function.\n");
+       up(&priv->cmd.slave_sem);
+}
+
 static int map_bf_area(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +987,10 @@ static int map_bf_area(struct mlx4_dev *dev)
        resource_size_t bf_len;
        int err = 0;
 
-       bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
-       bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+       bf_start = pci_resource_start(dev->pdev, 2) +
+                       (dev->caps.num_uars << PAGE_SHIFT);
+       bf_len = pci_resource_len(dev->pdev, 2) -
+                       (dev->caps.num_uars << PAGE_SHIFT);
        priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
        if (!priv->bf_mapping)
                err = -ENOMEM;
@@ -751,10 +1007,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
 static void mlx4_close_hca(struct mlx4_dev *dev)
 {
        unmap_bf_area(dev);
-       mlx4_CLOSE_HCA(dev, 0);
-       mlx4_free_icms(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+       if (mlx4_is_slave(dev))
+               mlx4_slave_exit(dev);
+       else {
+               mlx4_CLOSE_HCA(dev, 0);
+               mlx4_free_icms(dev);
+               mlx4_UNMAP_FA(dev);
+               mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+       }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u64 dma = (u64) priv->mfunc.vhcr_dma;
+       int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+       int ret_from_reset = 0;
+       u32 slave_read;
+       u32 cmd_channel_ver;
+
+       down(&priv->cmd.slave_sem);
+       priv->cmd.max_cmds = 1;
+       mlx4_warn(dev, "Sending reset\n");
+       ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+                                      MLX4_COMM_TIME);
+       /* if we are in the middle of flr the slave will try
+        * NUM_OF_RESET_RETRIES times before leaving.*/
+       if (ret_from_reset) {
+               if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+                       msleep(SLEEP_TIME_IN_RESET);
+                       while (ret_from_reset && num_of_reset_retries) {
+                               mlx4_warn(dev, "slave is currently in the"
+                                         "middle of FLR. retrying..."
+                                         "(try num:%d)\n",
+                                         (NUM_OF_RESET_RETRIES -
+                                          num_of_reset_retries  + 1));
+                               ret_from_reset =
+                                       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+                                                     0, MLX4_COMM_TIME);
+                               num_of_reset_retries = num_of_reset_retries - 1;
+                       }
+               } else
+                       goto err;
+       }
+
+       /* check the driver version - the slave I/F revision
+        * must match the master's */
+       slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+       cmd_channel_ver = mlx4_comm_get_version();
+
+       if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+               MLX4_COMM_GET_IF_REV(slave_read)) {
+               mlx4_err(dev, "slave driver version is not supported"
+                        " by the master\n");
+               goto err;
+       }
+
+       mlx4_warn(dev, "Sending vhcr0\n");
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+               goto err;
+       up(&priv->cmd.slave_sem);
+       return 0;
+
+err:
+       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+       up(&priv->cmd.slave_sem);
+       return -EIO;
 }
 
 static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1095,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        u64 icm_size;
        int err;
 
-       err = mlx4_QUERY_FW(dev);
-       if (err) {
-               if (err == -EACCES)
-                       mlx4_info(dev, "non-primary physical function, skipping.\n");
-               else
-                       mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
-               return err;
-       }
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_QUERY_FW(dev);
+               if (err) {
+                       if (err == -EACCES)
+                               mlx4_info(dev, "non-primary physical function, skipping.\n");
+                       else
+                               mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+                       goto unmap_bf;
+               }
 
-       err = mlx4_load_fw(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to start FW, aborting.\n");
-               return err;
-       }
+               err = mlx4_load_fw(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to start FW, aborting.\n");
+                       goto unmap_bf;
+               }
 
-       mlx4_cfg.log_pg_sz_m = 1;
-       mlx4_cfg.log_pg_sz = 0;
-       err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
-       if (err)
-               mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+               mlx4_cfg.log_pg_sz_m = 1;
+               mlx4_cfg.log_pg_sz = 0;
+               err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+               if (err)
+                       mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
 
-       err = mlx4_dev_cap(dev, &dev_cap);
-       if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
-               goto err_stop_fw;
-       }
+               err = mlx4_dev_cap(dev, &dev_cap);
+               if (err) {
+                       mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+                       goto err_stop_fw;
+               }
 
-       profile = default_profile;
+               profile = default_profile;
 
-       icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
-       if ((long long) icm_size < 0) {
-               err = icm_size;
-               goto err_stop_fw;
-       }
+               icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+                                            &init_hca);
+               if ((long long) icm_size < 0) {
+                       err = icm_size;
+                       goto err_stop_fw;
+               }
 
-       if (map_bf_area(dev))
-               mlx4_dbg(dev, "Failed to map blue flame area\n");
+               init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+               init_hca.uar_page_sz = PAGE_SHIFT - 12;
 
-       init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+               err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+               if (err)
+                       goto err_stop_fw;
 
-       err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
-       if (err)
-               goto err_stop_fw;
+               err = mlx4_INIT_HCA(dev, &init_hca);
+               if (err) {
+                       mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+                       goto err_free_icm;
+               }
+       } else {
+               err = mlx4_init_slave(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to initialize slave\n");
+                       goto unmap_bf;
+               }
 
-       err = mlx4_INIT_HCA(dev, &init_hca);
-       if (err) {
-               mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
-               goto err_free_icm;
+               err = mlx4_slave_cap(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to obtain slave caps\n");
+                       goto err_close;
+               }
        }
 
+       if (map_bf_area(dev))
+               mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+       /*Only the master set the ports, all the rest got it from it.*/
+       if (!mlx4_is_slave(dev))
+               mlx4_set_port_mask(dev);
+
        err = mlx4_QUERY_ADAPTER(dev, &adapter);
        if (err) {
                mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1177,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        return 0;
 
 err_close:
-       mlx4_CLOSE_HCA(dev, 0);
+       mlx4_close_hca(dev);
 
 err_free_icm:
-       mlx4_free_icms(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_free_icms(dev);
 
 err_stop_fw:
+       if (!mlx4_is_slave(dev)) {
+               mlx4_UNMAP_FA(dev);
+               mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+       }
+unmap_bf:
        unmap_bf_area(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
        return err;
 }
 
@@ -986,55 +1336,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                goto err_srq_table_free;
        }
 
-       err = mlx4_init_mcg_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "multicast group table, aborting.\n");
-               goto err_qp_table_free;
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_init_mcg_table(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to initialize "
+                                "multicast group table, aborting.\n");
+                       goto err_qp_table_free;
+               }
        }
 
        err = mlx4_init_counters_table(dev);
        if (err && err != -ENOENT) {
                mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
-               goto err_counters_table_free;
+               goto err_mcg_table_free;
        }
 
-       for (port = 1; port <= dev->caps.num_ports; port++) {
-               enum mlx4_port_type port_type = 0;
-               mlx4_SENSE_PORT(dev, port, &port_type);
-               if (port_type)
-                       dev->caps.port_type[port] = port_type;
-               ib_port_default_caps = 0;
-               err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
-               if (err)
-                       mlx4_warn(dev, "failed to get port %d default "
-                                 "ib capabilities (%d). Continuing with "
-                                 "caps = 0\n", port, err);
-               dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
-               err = mlx4_check_ext_port_caps(dev, port);
-               if (err)
-                       mlx4_warn(dev, "failed to get port %d extended "
-                                 "port capabilities support info (%d)."
-                                 " Assuming not supported\n", port, err);
+       if (!mlx4_is_slave(dev)) {
+               for (port = 1; port <= dev->caps.num_ports; port++) {
+                       ib_port_default_caps = 0;
+                       err = mlx4_get_port_ib_caps(dev, port,
+                                                   &ib_port_default_caps);
+                       if (err)
+                               mlx4_warn(dev, "failed to get port %d default "
+                                         "ib capabilities (%d). Continuing "
+                                         "with caps = 0\n", port, err);
+                       dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+                       err = mlx4_check_ext_port_caps(dev, port);
+                       if (err)
+                               mlx4_warn(dev, "failed to get port %d extended "
+                                         "port capabilities support info (%d)."
+                                         " Assuming not supported\n",
+                                         port, err);
 
-               err = mlx4_SET_PORT(dev, port);
-               if (err) {
-                       mlx4_err(dev, "Failed to set port %d, aborting\n",
-                               port);
-                       goto err_mcg_table_free;
+                       err = mlx4_SET_PORT(dev, port);
+                       if (err) {
+                               mlx4_err(dev, "Failed to set port %d, aborting\n",
+                                       port);
+                               goto err_counters_table_free;
+                       }
                }
        }
-       mlx4_set_port_mask(dev);
 
        return 0;
 
-err_mcg_table_free:
-       mlx4_cleanup_mcg_table(dev);
-
 err_counters_table_free:
        mlx4_cleanup_counters_table(dev);
 
+err_mcg_table_free:
+       mlx4_cleanup_mcg_table(dev);
+
 err_qp_table_free:
        mlx4_cleanup_qp_table(dev);
 
@@ -1081,8 +1432,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
        int i;
 
        if (msi_x) {
-               nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
-                            nreq);
+               /* In multifunction mode each function gets 2 msi-X vectors
+                * one for data path completions anf the other for asynch events
+                * or command completions */
+               if (mlx4_is_mfunc(dev)) {
+                       nreq = 2;
+               } else {
+                       nreq = min_t(int, dev->caps.num_eqs -
+                                    dev->caps.reserved_eqs, nreq);
+               }
+
                entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
                if (!entries)
                        goto no_msi;
@@ -1138,16 +1497,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
 
        info->dev = dev;
        info->port = port;
-       mlx4_init_mac_table(dev, &info->mac_table);
-       mlx4_init_vlan_table(dev, &info->vlan_table);
-       info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+       if (!mlx4_is_slave(dev)) {
+               INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+               mlx4_init_mac_table(dev, &info->mac_table);
+               mlx4_init_vlan_table(dev, &info->vlan_table);
+               info->base_qpn =
+                       dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
                        (port - 1) * (1 << log_num_mac);
+       }
 
        sprintf(info->dev_name, "mlx4_port%d", port);
        info->port_attr.attr.name = info->dev_name;
-       info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (mlx4_is_mfunc(dev))
+               info->port_attr.attr.mode = S_IRUGO;
+       else {
+               info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+               info->port_attr.store     = set_port_type;
+       }
        info->port_attr.show      = show_port_type;
-       info->port_attr.store     = set_port_type;
        sysfs_attr_init(&info->port_attr.attr);
 
        err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1587,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
        kfree(priv->steer);
 }
 
+static int extended_func_num(struct pci_dev *pdev)
+{
+       return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE        0x8069c
+#define MLX4_OWNER_SIZE        4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+       void __iomem *owner;
+       u32 ret;
+
+       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+                       MLX4_OWNER_SIZE);
+       if (!owner) {
+               mlx4_err(dev, "Failed to obtain ownership bit\n");
+               return -ENOMEM;
+       }
+
+       ret = readl(owner);
+       iounmap(owner);
+       return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+       void __iomem *owner;
+
+       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+                       MLX4_OWNER_SIZE);
+       if (!owner) {
+               mlx4_err(dev, "Failed to obtain ownership bit\n");
+               return;
+       }
+       writel(0, owner);
+       msleep(1000);
+       iounmap(owner);
+}
+
 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct mlx4_priv *priv;
@@ -1235,13 +1642,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                        "aborting.\n");
                return err;
        }
-
+       if (num_vfs > MLX4_MAX_NUM_VF) {
+               printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+                      num_vfs, MLX4_MAX_NUM_VF);
+               return -EINVAL;
+       }
        /*
-        * Check for BARs.  We expect 0: 1MB
+        * Check for BARs.
         */
-       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-           pci_resource_len(pdev, 0) != 1 << 20) {
-               dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+       if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+           !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev, "Missing DCS, aborting."
+                       "(id == 0X%p, id->driver_data: 0x%lx,"
+                       " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+                       id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
                err = -ENODEV;
                goto err_disable_pdev;
        }
@@ -1305,42 +1719,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        mutex_init(&priv->bf_mutex);
 
        dev->rev_id = pdev->revision;
+       /* Detect if this device is a virtual function */
+       if (id && id->driver_data & MLX4_VF) {
+               /* When acting as pf, we normally skip vfs unless explicitly
+                * requested to probe them. */
+               if (num_vfs && extended_func_num(pdev) > probe_vf) {
+                       mlx4_warn(dev, "Skipping virtual function:%d\n",
+                                               extended_func_num(pdev));
+                       err = -ENODEV;
+                       goto err_free_dev;
+               }
+               mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+               dev->flags |= MLX4_FLAG_SLAVE;
+       } else {
+               /* We reset the device and enable SRIOV only for physical
+                * devices.  Try to claim ownership on the device;
+                * if already taken, skip -- do not allow multiple PFs */
+               err = mlx4_get_ownership(dev);
+               if (err) {
+                       if (err < 0)
+                               goto err_free_dev;
+                       else {
+                               mlx4_warn(dev, "Multiple PFs not yet supported."
+                                         " Skipping PF.\n");
+                               err = -EINVAL;
+                               goto err_free_dev;
+                       }
+               }
 
-       /*
-        * Now reset the HCA before we touch the PCI capabilities or
-        * attempt a firmware command, since a boot ROM may have left
-        * the HCA in an undefined state.
-        */
-       err = mlx4_reset(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to reset HCA, aborting.\n");
-               goto err_free_dev;
+               if (num_vfs) {
+                       mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+                       err = pci_enable_sriov(pdev, num_vfs);
+                       if (err) {
+                               mlx4_err(dev, "Failed to enable sriov,"
+                                        "continuing without sriov enabled"
+                                        " (err = %d).\n", err);
+                               num_vfs = 0;
+                               err = 0;
+                       } else {
+                               mlx4_warn(dev, "Running in master mode\n");
+                               dev->flags |= MLX4_FLAG_SRIOV |
+                                             MLX4_FLAG_MASTER;
+                               dev->num_vfs = num_vfs;
+                       }
+               }
+
+               /*
+                * Now reset the HCA before we touch the PCI capabilities or
+                * attempt a firmware command, since a boot ROM may have left
+                * the HCA in an undefined state.
+                */
+               err = mlx4_reset(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+                       goto err_rel_own;
+               }
        }
 
+slave_start:
        if (mlx4_cmd_init(dev)) {
                mlx4_err(dev, "Failed to init command interface, aborting.\n");
-               goto err_free_dev;
+               goto err_sriov;
+       }
+
+       /* In slave functions, the communication channel must be initialized
+        * before posting commands. Also, init num_slaves before calling
+        * mlx4_init_hca */
+       if (mlx4_is_mfunc(dev)) {
+               if (mlx4_is_master(dev))
+                       dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+               else {
+                       dev->num_slaves = 0;
+                       if (mlx4_multi_func_init(dev)) {
+                               mlx4_err(dev, "Failed to init slave mfunc"
+                                        " interface, aborting.\n");
+                               goto err_cmd;
+                       }
+               }
        }
 
        err = mlx4_init_hca(dev);
-       if (err)
-               goto err_cmd;
+       if (err) {
+               if (err == -EACCES) {
+                       /* Not primary Physical function
+                        * Running in slave mode */
+                       mlx4_cmd_cleanup(dev);
+                       dev->flags |= MLX4_FLAG_SLAVE;
+                       dev->flags &= ~MLX4_FLAG_MASTER;
+                       goto slave_start;
+               } else
+                       goto err_mfunc;
+       }
+
+       /* In master functions, the communication channel must be initialized
+        * after obtaining its address from fw */
+       if (mlx4_is_master(dev)) {
+               if (mlx4_multi_func_init(dev)) {
+                       mlx4_err(dev, "Failed to init master mfunc"
+                                "interface, aborting.\n");
+                       goto err_close;
+               }
+       }
 
        err = mlx4_alloc_eq_table(dev);
        if (err)
-               goto err_close;
+               goto err_master_mfunc;
 
        priv->msix_ctl.pool_bm = 0;
        spin_lock_init(&priv->msix_ctl.pool_lock);
 
        mlx4_enable_msi_x(dev);
-
-       err = mlx4_init_steering(dev);
-       if (err)
+       if ((mlx4_is_mfunc(dev)) &&
+           !(dev->flags & MLX4_FLAG_MSI_X)) {
+               mlx4_err(dev, "INTx is not supported in multi-function mode."
+                        " aborting.\n");
                goto err_free_eq;
+       }
+
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_init_steering(dev);
+               if (err)
+                       goto err_free_eq;
+       }
 
        err = mlx4_setup_hca(dev);
-       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+           !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
@@ -1383,20 +1887,37 @@ err_port:
        mlx4_cleanup_uar_table(dev);
 
 err_steer:
-       mlx4_clear_steering(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_clear_steering(dev);
 
 err_free_eq:
        mlx4_free_eq_table(dev);
 
+err_master_mfunc:
+       if (mlx4_is_master(dev))
+               mlx4_multi_func_cleanup(dev);
+
 err_close:
        if (dev->flags & MLX4_FLAG_MSI_X)
                pci_disable_msix(pdev);
 
        mlx4_close_hca(dev);
 
+err_mfunc:
+       if (mlx4_is_slave(dev))
+               mlx4_multi_func_cleanup(dev);
+
 err_cmd:
        mlx4_cmd_cleanup(dev);
 
+err_sriov:
+       if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+               pci_disable_sriov(pdev);
+
+err_rel_own:
+       if (!mlx4_is_slave(dev))
+               mlx4_free_ownership(dev);
+
 err_free_dev:
        kfree(priv);
 
@@ -1424,6 +1945,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        int p;
 
        if (dev) {
+               /* in SRIOV it is not allowed to unload the pf's
+                * driver while there are alive vf's */
+               if (mlx4_is_master(dev)) {
+                       if (mlx4_how_many_lives_vf(dev))
+                               printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+               }
                mlx4_stop_sense(dev);
                mlx4_unregister_device(dev);
 
@@ -1443,17 +1970,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
                mlx4_cleanup_xrcd_table(dev);
                mlx4_cleanup_pd_table(dev);
 
+               if (mlx4_is_master(dev))
+                       mlx4_free_resource_tracker(dev);
+
                iounmap(priv->kar);
                mlx4_uar_free(dev, &priv->driver_uar);
                mlx4_cleanup_uar_table(dev);
-               mlx4_clear_steering(dev);
+               if (!mlx4_is_slave(dev))
+                       mlx4_clear_steering(dev);
                mlx4_free_eq_table(dev);
+               if (mlx4_is_master(dev))
+                       mlx4_multi_func_cleanup(dev);
                mlx4_close_hca(dev);
+               if (mlx4_is_slave(dev))
+                       mlx4_multi_func_cleanup(dev);
                mlx4_cmd_cleanup(dev);
 
                if (dev->flags & MLX4_FLAG_MSI_X)
                        pci_disable_msix(pdev);
+               if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+                       mlx4_warn(dev, "Disabling sriov\n");
+                       pci_disable_sriov(pdev);
+               }
 
+               if (!mlx4_is_slave(dev))
+                       mlx4_free_ownership(dev);
                kfree(priv);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
@@ -1468,33 +2009,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
 }
 
 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
-       { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
-       { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
-       { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
-       { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
-       { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
-       { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
-       { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+       /* MT25408 "Hermon" SDR */
+       { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+       /* MT25408 "Hermon" DDR */
+       { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+       /* MT25408 "Hermon" QDR */
+       { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+       /* MT25408 "Hermon" DDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+       /* MT25408 "Hermon" QDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+       /* MT25408 "Hermon" EN 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+       /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+       /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+       /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+       /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+       { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+       /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+       { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+       /* MT26478 ConnectX2 40GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+       /* MT25400 Family [ConnectX-2 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+       /* MT27500 Family [ConnectX-3] */
+       { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+       /* MT27500 Family [ConnectX-3 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+       { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
        { 0, }
 };
 
@@ -1523,6 +2079,12 @@ static int __init mlx4_verify_params(void)
                return -1;
        }
 
+       /* Check if module param for ports type has legal combination */
+       if (port_type_array[0] == false && port_type_array[1] == true) {
+               printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+               port_type_array[0] = true;
+       }
+
        return 0;
 }
 
index 978688c3104685d9ec2054e987ed891b3b4646c4..0785d9b2a265811838133550b72522867b34838e 100644 (file)
 
 static const u8 zero_gid[16];  /* automatically initialized to 0 */
 
+struct mlx4_mgm {
+       __be32                  next_gid_index;
+       __be32                  members_count;
+       u32                     reserved[2];
+       u8                      gid[16];
+       __be32                  qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+       return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+       return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
                           struct mlx4_cmd_mailbox *mailbox)
 {
        return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
                            struct mlx4_cmd_mailbox *mailbox)
 {
        return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
                              struct mlx4_cmd_mailbox *mailbox)
 {
        u32 in_mod;
 
-       in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+       in_mod = (u32) port << 16 | steer << 1;
        return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
-                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_NATIVE);
 }
 
 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
        int err;
 
        err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
-                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
 
        if (!err)
                *hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  * Add new entry to steering data structure.
  * All promisc QPs should be added as well
  */
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
                              enum mlx4_steer_type steer,
                              unsigned int index, u32 qpn)
 {
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        struct mlx4_promisc_qp *dqp = NULL;
        u32 prot;
        int err;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
        new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
        if (!new_entry)
                return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        /* If the given qpn is also a promisc qp,
         * it should be inserted to duplicates list
         */
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (pqp) {
                dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
                if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
                /* don't add already existing qpn */
                if (pqp->qpn == qpn)
                        continue;
-               if (members_count == MLX4_QP_PER_MGM) {
+               if (members_count == dev->caps.num_qp_per_mgm) {
                        /* out of space */
                        err = -ENOMEM;
                        goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
 }
 
 /* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
                                   enum mlx4_steer_type steer,
                                   unsigned int index, u32 qpn)
 {
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        struct mlx4_steer_index *tmp_entry, *entry = NULL;
        struct mlx4_promisc_qp *pqp;
        struct mlx4_promisc_qp *dqp;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (!pqp)
                return 0; /* nothing to do */
 
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
         * we need to add it as a duplicate to this entry
         * for future references */
        list_for_each_entry(dqp, &entry->duplicates, list) {
-               if (qpn == dqp->qpn)
+               if (qpn == pqp->qpn)
                        return 0; /* qp is already duplicated */
        }
 
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
 
 /* Check whether a qpn is a duplicate on steering entry
  * If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
                                  enum mlx4_steer_type steer,
                                  unsigned int index, u32 qpn)
 {
        struct mlx4_steer *s_steer;
        struct mlx4_steer_index *tmp_entry, *entry = NULL;
        struct mlx4_promisc_qp *dqp, *tmp_dqp;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        /* if qp is not promisc, it cannot be duplicated */
-       if (!get_promisc_qp(dev, pf_num, steer, qpn))
+       if (!get_promisc_qp(dev, 0, steer, qpn))
                return false;
 
        /* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
 }
 
 /* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
                                      enum mlx4_steer_type steer,
                                      unsigned int index, u32 tqpn)
 {
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        u32 members_count;
        bool ret = false;
        int i;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        for (i = 0;  i < members_count; i++) {
                qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
-               if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+               if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
                        /* the qp is not promisc, the entry can't be removed */
                        goto out;
                }
@@ -332,7 +344,7 @@ out:
        return ret;
 }
 
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
                          enum mlx4_steer_type steer, u32 qpn)
 {
        struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
        bool found;
        int last_index;
        int err;
-       u8 pf_num;
        struct mlx4_priv *priv = mlx4_priv(dev);
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        mutex_lock(&priv->mcg_table.mutex);
 
-       if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+       if (get_promisc_qp(dev, 0, steer, qpn)) {
                err = 0;  /* Noting to do, already exists */
                goto out_mutex;
        }
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                }
                if (!found) {
                        /* Need to add the qpn to mgm */
-                       if (members_count == MLX4_QP_PER_MGM) {
+                       if (members_count == dev->caps.num_qp_per_mgm) {
                                /* entry is full */
                                err = -ENOMEM;
                                goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
        if (err)
                goto out_list;
 
@@ -439,7 +450,7 @@ out_mutex:
        return err;
 }
 
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
                             enum mlx4_steer_type steer, u32 qpn)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
        bool back_to_list = false;
        int loc, i;
        int err;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
        mutex_lock(&priv->mcg_table.mutex);
 
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (unlikely(!pqp)) {
                mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
                /* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                goto out_list;
        }
        mgm = mailbox->buf;
+       memset(mgm, 0, sizeof *mgm);
        members_count = 0;
        list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
        if (err)
                goto out_mailbox;
 
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                }
                index += dev->caps.num_mgms;
 
+               new_entry = 1;
                memset(mgm, 0, sizeof *mgm);
                memcpy(mgm->gid, gid, 16);
        }
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       if (members_count == MLX4_QP_PER_MGM) {
+       if (members_count == dev->caps.num_qp_per_mgm) {
                mlx4_err(dev, "MGM at index %x is full.\n", index);
                err = -ENOMEM;
                goto out;
@@ -696,9 +707,9 @@ out:
        if (prot == MLX4_PROT_ETH) {
                /* manage the steering entry for promisc mode */
                if (new_entry)
-                       new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+                       new_steering_entry(dev, port, steer, index, qp->qpn);
                else
-                       existing_steering_entry(dev, 0, port, steer,
+                       existing_steering_entry(dev, port, steer,
                                                index, qp->qpn);
        }
        if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
        /* if this pq is also a promisc qp, it shouldn't be removed */
        if (prot == MLX4_PROT_ETH &&
-           check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+           check_duplicate_entry(dev, port, steer, index, qp->qpn))
                goto out;
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        mgm->qp[i - 1]     = 0;
 
        if (prot == MLX4_PROT_ETH)
-               removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+               removed_entry = can_remove_steering_entry(dev, port, steer,
+                                                               index, qp->qpn);
        if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
                err = mlx4_WRITE_ENTRY(dev, index, mailbox);
                goto out;
@@ -828,6 +840,34 @@ out:
        return err;
 }
 
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                         u8 gid[16], u8 attach, u8 block_loopback,
+                         enum mlx4_protocol prot)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       int err = 0;
+       int qpn;
+
+       if (!mlx4_is_mfunc(dev))
+               return -EBADF;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, gid, 16);
+       qpn = qp->qpn;
+       qpn |= (prot << 28);
+       if (attach && block_loopback)
+               qpn |= (1 << 31);
+
+       err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+                      MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        if (prot == MLX4_PROT_ETH)
                gid[7] |= (steer << 1);
 
-       return mlx4_qp_attach_common(dev, qp, gid,
-                                    block_mcast_loopback, prot,
-                                    steer);
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 1,
+                                       block_mcast_loopback, prot);
+
+       return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+                                       prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
 
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                        !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
-       if (prot == MLX4_PROT_ETH) {
+       if (prot == MLX4_PROT_ETH)
                gid[7] |= (steer << 1);
-       }
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
 
        return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+                       struct mlx4_qp *qp, u8 gid[16],
+                       int block_mcast_loopback, enum mlx4_protocol prot)
+{
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH)
+               gid[7] |= (MLX4_UC_STEER << 1);
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 1,
+                                       block_mcast_loopback, prot);
+
+       return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+                                       prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                              u8 gid[16], enum mlx4_protocol prot)
+{
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH)
+               gid[7] |= (MLX4_UC_STEER << 1);
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+       return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+       u8 port = vhcr->in_param >> 62;
+       enum mlx4_steer_type steer = vhcr->in_modifier;
+
+       /* Promiscuous unicast is not allowed in mfunc */
+       if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+               return 0;
+
+       if (vhcr->op_modifier)
+               return add_promisc_qp(dev, port, steer, qpn);
+       else
+               return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+                       enum mlx4_steer_type steer, u8 add, u8 port)
+{
+       return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+                       MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
+}
 
 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
 {
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
 
-       return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+       return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
 
@@ -884,8 +995,10 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
 
-       return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+       return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
 
@@ -894,8 +1007,10 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
 
-       return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+       return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
 
@@ -904,7 +1019,10 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
-       return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+       return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
 
index 5dfa68ffc11c4ea9bacb8b8bdfff94299fd630c1..a80121a2b5195cf245bd16842f3755c822646781 100644 (file)
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
 
 #define DRV_NAME       "mlx4_core"
-#define DRV_VERSION    "1.0"
-#define DRV_RELDATE    "July 14, 2011"
+#define PFX            DRV_NAME ": "
+#define DRV_VERSION    "1.1"
+#define DRV_RELDATE    "Dec, 2011"
 
 enum {
        MLX4_HCR_BASE           = 0x80680,
        MLX4_HCR_SIZE           = 0x0001c,
-       MLX4_CLR_INT_SIZE       = 0x00008
+       MLX4_CLR_INT_SIZE       = 0x00008,
+       MLX4_SLAVE_COMM_BASE    = 0x0,
+       MLX4_COMM_PAGESIZE      = 0x1000
 };
 
 enum {
-       MLX4_MGM_ENTRY_SIZE     =  0x100,
-       MLX4_QP_PER_MGM         = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
-       MLX4_MTT_ENTRY_PER_SEG  = 8
+       MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+       MLX4_MAX_QP_PER_MGM     = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+       MLX4_MTT_ENTRY_PER_SEG  = 8,
 };
 
 enum {
@@ -80,6 +84,94 @@ enum {
        MLX4_NUM_CMPTS          = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
 };
 
+enum mlx4_mr_state {
+       MLX4_MR_DISABLED = 0,
+       MLX4_MR_EN_HW,
+       MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME         10000
+enum {
+       MLX4_COMM_CMD_RESET,
+       MLX4_COMM_CMD_VHCR0,
+       MLX4_COMM_CMD_VHCR1,
+       MLX4_COMM_CMD_VHCR2,
+       MLX4_COMM_CMD_VHCR_EN,
+       MLX4_COMM_CMD_VHCR_POST,
+       MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES   10
+#define SLEEP_TIME_IN_RESET    (2 * 1000)
+enum mlx4_resource {
+       RES_QP,
+       RES_CQ,
+       RES_SRQ,
+       RES_XRCD,
+       RES_MPT,
+       RES_MTT,
+       RES_MAC,
+       RES_VLAN,
+       RES_EQ,
+       RES_COUNTER,
+       MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+       RES_OP_RESERVE,
+       RES_OP_RESERVE_AND_MAP,
+       RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+       u64     in_param;
+       u64     out_param;
+       u32     in_modifier;
+       u32     errno;
+       u16     op;
+       u16     token;
+       u8      op_modifier;
+       u8      e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+       __be64 in_param;
+       __be32 in_modifier;
+       __be64 out_param;
+       __be16 token;
+       u16 reserved;
+       u8 status;
+       u8 flags;
+       __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+       u16 opcode;
+       bool has_inbox;
+       bool has_outbox;
+       bool out_is_imm;
+       bool encode_slave_id;
+       int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+                     struct mlx4_cmd_mailbox *inbox);
+       int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+                      struct mlx4_cmd_mailbox *inbox,
+                      struct mlx4_cmd_mailbox *outbox,
+                      struct mlx4_cmd_info *cmd);
+};
+
 #ifdef CONFIG_MLX4_DEBUG
 extern int mlx4_debug_level;
 #else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do {                                                                        \
 #define mlx4_warn(mdev, format, arg...) \
        dev_warn(&mdev->pdev->dev, format, ##arg)
 
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES    (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
 struct mlx4_bitmap {
        u32                     last;
        u32                     top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
        struct mlx4_icm       **icm;
 };
 
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+       __be32 flags;
+       __be32 qpn;
+       __be32 key;
+       __be32 pd_flags;
+       __be64 start;
+       __be64 length;
+       __be32 lkey;
+       __be32 win_cnt;
+       u8      reserved1[3];
+       u8      mtt_rep;
+       __be64 mtt_addr;
+       __be32 mtt_sz;
+       __be32 entity_size;
+       __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       u8                      log_eq_size;
+       u8                      reserved2[4];
+       u8                      eq_period;
+       u8                      reserved3;
+       u8                      eq_max_count;
+       u8                      reserved4[3];
+       u8                      intr;
+       u8                      log_page_size;
+       u8                      reserved5[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       u32                     reserved6[2];
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved7[4];
+};
+
+struct mlx4_cq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       __be32                  logsize_usrpage;
+       __be16                  cq_period;
+       __be16                  cq_max_count;
+       u8                      reserved2[3];
+       u8                      comp_eqn;
+       u8                      log_page_size;
+       u8                      reserved3[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  last_notified_index;
+       __be32                  solicit_producer_index;
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved4[2];
+       __be64                  db_rec_addr;
+};
+
+struct mlx4_srq_context {
+       __be32                  state_logsize_srqn;
+       u8                      logstride;
+       u8                      reserved1;
+       __be16                  xrcd;
+       __be32                  pg_offset_cqn;
+       u32                     reserved2;
+       u8                      log_page_size;
+       u8                      reserved3[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  pd;
+       __be16                  limit_watermark;
+       __be16                  wqe_cnt;
+       u16                     reserved4;
+       __be16                  wqe_counter;
+       u32                     reserved5;
+       __be64                  db_rec_addr;
+};
+
+struct mlx4_eqe {
+       u8                      reserved1;
+       u8                      type;
+       u8                      reserved2;
+       u8                      subtype;
+       union {
+               u32             raw[6];
+               struct {
+                       __be32  cqn;
+               } __packed comp;
+               struct {
+                       u16     reserved1;
+                       __be16  token;
+                       u32     reserved2;
+                       u8      reserved3[3];
+                       u8      status;
+                       __be64  out_param;
+               } __packed cmd;
+               struct {
+                       __be32  qpn;
+               } __packed qp;
+               struct {
+                       __be32  srqn;
+               } __packed srq;
+               struct {
+                       __be32  cqn;
+                       u32     reserved1;
+                       u8      reserved2[3];
+                       u8      syndrome;
+               } __packed cq_err;
+               struct {
+                       u32     reserved1[2];
+                       __be32  port;
+               } __packed port_change;
+               struct {
+                       #define COMM_CHANNEL_BIT_ARRAY_SIZE     4
+                       u32 reserved;
+                       u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+               } __packed comm_channel_arm;
+               struct {
+                       u8      port;
+                       u8      reserved[3];
+                       __be64  mac;
+               } __packed mac_update;
+               struct {
+                       u8      port;
+               } __packed sw_event;
+               struct {
+                       __be32  slave_id;
+               } __packed flr_event;
+       }                       event;
+       u8                      slave_id;
+       u8                      reserved3[2];
+       u8                      owner;
+} __packed;
+
 struct mlx4_eq {
        struct mlx4_dev        *dev;
        void __iomem           *doorbell;
@@ -142,6 +381,18 @@ struct mlx4_eq {
        struct mlx4_mtt         mtt;
 };
 
+struct mlx4_slave_eqe {
+       u8 type;
+       u8 port;
+       u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+       u32 eqn;
+       u16 token;
+       u64 event_type;
+};
+
 struct mlx4_profile {
        int                     num_qp;
        int                     rdmarc_per_qp;
@@ -155,16 +406,37 @@ struct mlx4_profile {
 struct mlx4_fw {
        u64                     clr_int_base;
        u64                     catas_offset;
+       u64                     comm_base;
        struct mlx4_icm        *fw_icm;
        struct mlx4_icm        *aux_icm;
        u32                     catas_size;
        u16                     fw_pages;
        u8                      clr_int_bar;
        u8                      catas_bar;
+       u8                      comm_bar;
 };
 
-#define MGM_QPN_MASK       0x00FFFFFF
-#define MGM_BLCK_LB_BIT    30
+struct mlx4_comm {
+       u32                     slave_write;
+       u32                     slave_read;
+};
+
+enum {
+       MLX4_MCAST_CONFIG       = 0,
+       MLX4_MCAST_DISABLE      = 1,
+       MLX4_MCAST_ENABLE       = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+       __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+       struct list_head list;
+       u64 addr;
+};
 
 struct mlx4_promisc_qp {
        struct list_head list;
@@ -177,19 +449,87 @@ struct mlx4_steer_index {
        struct list_head duplicates;
 };
 
-struct mlx4_mgm {
-       __be32                  next_gid_index;
-       __be32                  members_count;
-       u32                     reserved[2];
-       u8                      gid[16];
-       __be32                  qp[MLX4_QP_PER_MGM];
+struct mlx4_slave_state {
+       u8 comm_toggle;
+       u8 last_cmd;
+       u8 init_port_mask;
+       bool active;
+       u8 function;
+       dma_addr_t vhcr_dma;
+       u16 mtu[MLX4_MAX_PORTS + 1];
+       __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+       struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+       struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+       struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+       struct mlx4_slave_event_eq_info event_eq;
+       u16 eq_pi;
+       u16 eq_ci;
+       spinlock_t lock;
+       /*initialized via the kzalloc*/
+       u8 is_slave_going_down;
+       u32 cookie;
+};
+
+struct slave_list {
+       struct mutex mutex;
+       struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+       spinlock_t lock;
+       /* tree for each resources */
+       struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+       /* num_of_slave's lists, one per slave */
+       struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE    128
+struct mlx4_slave_event_eq {
+       u32 eqn;
+       u32 cons;
+       u32 prod;
+       struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+       int proxy_qp0_active;
+       int qp0_active;
+       int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+       struct mlx4_slave_state *slave_state;
+       struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+       int                     init_port_ref[MLX4_MAX_PORTS + 1];
+       u16                     max_mtu[MLX4_MAX_PORTS + 1];
+       int                     disable_mcast_ref[MLX4_MAX_PORTS + 1];
+       struct mlx4_resource_tracker res_tracker;
+       struct workqueue_struct *comm_wq;
+       struct work_struct      comm_work;
+       struct work_struct      slave_event_work;
+       struct work_struct      slave_flr_event_work;
+       spinlock_t              slave_state_lock;
+       __be32                  comm_arm_bit_vector[4];
+       struct mlx4_eqe         cmd_eqe;
+       struct mlx4_slave_event_eq slave_eq;
+       struct mutex            gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+       struct mlx4_comm __iomem       *comm;
+       struct mlx4_vhcr_cmd           *vhcr;
+       dma_addr_t                      vhcr_dma;
+
+       struct mlx4_mfunc_master_ctx    master;
 };
+
 struct mlx4_cmd {
        struct pci_pool        *pool;
        void __iomem           *hcr;
        struct mutex            hcr_mutex;
        struct semaphore        poll_sem;
        struct semaphore        event_sem;
+       struct semaphore        slave_sem;
        int                     max_cmds;
        spinlock_t              context_lock;
        int                     free_head;
@@ -197,6 +537,7 @@ struct mlx4_cmd {
        u16                     token_mask;
        u8                      use_events;
        u8                      toggle;
+       u8                      comm_toggle;
 };
 
 struct mlx4_uar_table {
@@ -287,6 +628,48 @@ struct mlx4_vlan_table {
        int                     max;
 };
 
+#define SET_PORT_GEN_ALL_VALID         0x7
+#define SET_PORT_PROMISC_SHIFT         31
+#define SET_PORT_MC_PROMISC_SHIFT      30
+
+enum {
+       MCAST_DIRECT_ONLY       = 0,
+       MCAST_DIRECT            = 1,
+       MCAST_DEFAULT           = 2
+};
+
+
+struct mlx4_set_port_general_context {
+       u8 reserved[3];
+       u8 flags;
+       u16 reserved2;
+       __be16 mtu;
+       u8 pptx;
+       u8 pfctx;
+       u16 reserved3;
+       u8 pprx;
+       u8 pfcrx;
+       u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+       __be32 base_qpn;
+       u8 rererved;
+       u8 n_mac;
+       u8 n_vlan;
+       u8 n_prio;
+       u8 reserved2[3];
+       u8 mac_miss;
+       u8 intra_no_vlan;
+       u8 no_vlan;
+       u8 intra_vlan_miss;
+       u8 vlan_miss;
+       u8 reserved3[3];
+       u8 no_vlan_prio;
+       __be32 promisc;
+       __be32 mcast;
+};
+
 struct mlx4_mac_entry {
        u64 mac;
 };
@@ -333,6 +716,7 @@ struct mlx4_priv {
 
        struct mlx4_fw          fw;
        struct mlx4_cmd         cmd;
+       struct mlx4_mfunc       mfunc;
 
        struct mlx4_bitmap      pd_bitmap;
        struct mlx4_bitmap      xrcd_bitmap;
@@ -359,6 +743,7 @@ struct mlx4_priv {
        struct list_head        bf_list;
        struct mutex            bf_mutex;
        struct io_mapping       *bf_mapping;
+       int                     reserved_mtts;
 };
 
 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +788,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+                    struct mlx4_vhcr *vhcr,
+                    struct mlx4_cmd_mailbox *inbox,
+                    struct mlx4_cmd_mailbox *outbox,
+                    struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                           int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    int start_index, int npages, u64 *page_list);
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +860,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                      struct mlx4_profile *request,
                      struct mlx4_dev_cap *dev_cap,
                      struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
 
 int mlx4_cmd_init(struct mlx4_dev *dev);
 void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
 int mlx4_cmd_use_events(struct mlx4_dev *dev);
 void mlx4_cmd_use_polling(struct mlx4_dev *dev);
 
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+                 unsigned long timeout);
+
 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
 
@@ -452,12 +993,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+                                   enum mlx4_resource resource_type,
+                                   int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
 int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
 
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          enum mlx4_protocol prot, enum mlx4_steer_type steer);
 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol prot,
                          enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+                                    int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+                               struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+                                  struct mlx4_vhcr *vhcr,
+                                  struct mlx4_cmd_mailbox *inbox,
+                                  struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+       *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+       *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+       return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+       return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+       return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
 #endif /* MLX4_H */
index 8fda331c65dfddd19a69da927444ec41be546da4..f2a8e65f5f88a4df9ff9861bddfbfeae458d6ec6 100644 (file)
@@ -51,8 +51,8 @@
 #include "en_port.h"
 
 #define DRV_NAME       "mlx4_en"
-#define DRV_VERSION    "1.5.4.2"
-#define DRV_RELDATE    "October 2011"
+#define DRV_VERSION    "2.0"
+#define DRV_RELDATE    "Dec 2011"
 
 #define MLX4_EN_MSG_LEVEL      (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
 
@@ -272,6 +272,7 @@ struct mlx4_en_rx_ring {
        u32 prod;
        u32 cons;
        u32 buf_size;
+       u8  fcs_del;
        void *buf;
        void *rx_info;
        unsigned long bytes;
@@ -365,16 +366,6 @@ struct mlx4_en_rss_map {
        enum mlx4_qp_state indir_state;
 };
 
-struct mlx4_en_rss_context {
-       __be32 base_qpn;
-       __be32 default_qpn;
-       u16 reserved;
-       u8 hash_fn;
-       u8 flags;
-       __be32 rss_key[10];
-       __be32 base_qpn_udp;
-};
-
 struct mlx4_en_port_state {
        int link_state;
        int link_speed;
@@ -462,6 +453,7 @@ struct mlx4_en_priv {
        int base_qpn;
 
        struct mlx4_en_rss_map rss_map;
+       u32 ctrl_flags;
        u32 flags;
 #define MLX4_EN_FLAG_PROMISC   0x1
 #define MLX4_EN_FLAG_MC_PROMISC        0x2
@@ -494,9 +486,9 @@ struct mlx4_en_priv {
 enum mlx4_en_wol {
        MLX4_EN_WOL_MAGIC = (1ULL << 61),
        MLX4_EN_WOL_ENABLED = (1ULL << 62),
-       MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
 };
 
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
 
 void mlx4_en_destroy_netdev(struct net_device *dev);
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
index efa3e77355e499eb365b74a0e2fabe8e79f5fa5d..01df5567e16e48398c2a119d2c39de060934e712 100644 (file)
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #include <linux/mlx4/cmd.h>
 
 #include "mlx4.h"
 #include "icm.h"
 
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
-       __be32 flags;
-       __be32 qpn;
-       __be32 key;
-       __be32 pd_flags;
-       __be64 start;
-       __be64 length;
-       __be32 lkey;
-       __be32 win_cnt;
-       u8      reserved1[3];
-       u8      mtt_rep;
-       __be64 mtt_seg;
-       __be32 mtt_sz;
-       __be32 entity_size;
-       __be32 first_byte_offset;
-} __packed;
-
 #define MLX4_MPT_FLAG_SW_OWNS      (0xfUL << 28)
 #define MLX4_MPT_FLAG_FREE         (0x3UL << 28)
 #define MLX4_MPT_FLAG_MIO          (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
        kfree(buddy->num_free);
 }
 
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
 {
        struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
        u32 seg;
+       int seg_order;
+       u32 offset;
 
-       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+       seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
        if (seg == -1)
                return -1;
 
-       if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
-                                seg + (1 << order) - 1)) {
-               mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+       offset = seg * (1 << log_mtts_per_seg);
+
+       if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+                                offset + (1 << order) - 1)) {
+               mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
                return -1;
        }
 
-       return seg;
+       return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, order);
+               err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+                                                      RES_OP_RESERVE_AND_MAP,
+                                                      MLX4_CMD_ALLOC_RES,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_WRAPPED);
+               if (err)
+                       return -1;
+               return get_param_l(&out_param);
+       }
+       return __mlx4_alloc_mtt_range(dev, order);
 }
 
 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
        } else
                mtt->page_shift = page_shift;
 
-       for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+       for (mtt->order = 0, i = 1; i < npages; i <<= 1)
                ++mtt->order;
 
-       mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
-       if (mtt->first_seg == -1)
+       mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+       if (mtt->offset == -1)
                return -ENOMEM;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
 
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
 {
+       u32 first_seg;
+       int seg_order;
        struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 
+       seg_order = max_t(int, order - log_mtts_per_seg, 0);
+       first_seg = offset / (1 << log_mtts_per_seg);
+
+       mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+       mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
+                            offset + (1 << order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, offset);
+               set_param_h(&in_param, order);
+               err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+                                                      MLX4_CMD_FREE_RES,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_WRAPPED);
+               if (err)
+                       mlx4_warn(dev, "Failed to free mtt range at:"
+                                 "%d order:%d\n", offset, order);
+               return;
+       }
+        __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
        if (mtt->order < 0)
                return;
 
-       mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
-       mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
-                            mtt->first_seg + (1 << mtt->order) - 1);
+       mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
 
 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
 {
-       return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+       return (u64) mtt->offset * dev->caps.mtt_entry_sz;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
 
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int mpt_index)
 {
-       return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
-                       MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+                       0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int mpt_index)
 {
        return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
-                           !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+                           !mailbox, MLX4_CMD_HW2SW_MPT,
+                           MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
-                 int npages, int page_shift, struct mlx4_mr *mr)
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                         u32 *base_mridx)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       u32 index;
-       int err;
+       u32 mridx;
 
-       index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
-       if (index == -1)
+       mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+       if (mridx == -1)
                return -ENOMEM;
 
+       *base_mridx = mridx;
+       return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+                          u64 iova, u64 size, u32 access, int npages,
+                          int page_shift, struct mlx4_mr *mr)
+{
        mr->iova       = iova;
        mr->size       = size;
        mr->pd         = pd;
        mr->access     = access;
-       mr->enabled    = 0;
-       mr->key        = hw_index_to_key(index);
+       mr->enabled    = MLX4_MR_DISABLED;
+       mr->key        = hw_index_to_key(mridx);
+
+       return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+                         struct mlx4_cmd_mailbox *mailbox,
+                         int num_entries)
+{
+       return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+                       MLX4_CMD_TIME_CLASS_A,  MLX4_CMD_WRAPPED);
+}
 
-       err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+       u64 out_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       return -1;
+               return get_param_l(&out_param);
+       }
+       return  __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, index);
+               if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to release mr index:%d\n",
+                                 index);
+               return;
+       }
+       __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+       u64 param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&param, index);
+               return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+                                                       MLX4_CMD_ALLOC_RES,
+                                                       MLX4_CMD_TIME_CLASS_A,
+                                                       MLX4_CMD_WRAPPED);
+       }
+       return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, index);
+               if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+                            MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                            MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+                                 index);
+               return;
+       }
+       return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+                 int npages, int page_shift, struct mlx4_mr *mr)
+{
+       u32 index;
+       int err;
+
+       index = mlx4_mr_reserve(dev);
+       if (index == -1)
+               return -ENOMEM;
+
+       err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+                                    access, npages, page_shift, mr);
        if (err)
-               mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+               mlx4_mr_release(dev, index);
 
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
 
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-       struct mlx4_priv *priv = mlx4_priv(dev);
        int err;
 
-       if (mr->enabled) {
+       if (mr->enabled == MLX4_MR_EN_HW) {
                err = mlx4_HW2SW_MPT(dev, NULL,
                                     key_to_hw_index(mr->key) &
                                     (dev->caps.num_mpts - 1));
                if (err)
-                       mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
-       }
+                       mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
 
+               mr->enabled = MLX4_MR_EN_SW;
+       }
        mlx4_mtt_cleanup(dev, &mr->mtt);
-       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+       mlx4_mr_free_reserved(dev, mr);
+       if (mr->enabled)
+               mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+       mlx4_mr_release(dev, key_to_hw_index(mr->key));
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_free);
 
 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_mpt_entry *mpt_entry;
        int err;
 
-       err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
        if (err)
                return err;
 
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
-               mpt_entry->mtt_seg = 0;
+               mpt_entry->mtt_addr = 0;
        } else {
-               mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+               mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+                                                 &mr->mtt));
        }
 
        if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
                mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
                mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
                                                   MLX4_MPT_PD_FLAG_RAE);
-               mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
-                                                  dev->caps.mtts_per_seg);
+               mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
        } else {
                mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
        }
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
                mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
                goto err_cmd;
        }
-
-       mr->enabled = 1;
+       mr->enabled = MLX4_MR_EN_HW;
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -373,7 +546,7 @@ err_cmd:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
 err_table:
-       mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        __be64 *mtts;
        dma_addr_t dma_handle;
        int i;
-       int s = start_index * sizeof (u64);
 
-       /* All MTTs must fit in the same page */
-       if (start_index / (PAGE_SIZE / sizeof (u64)) !=
-           (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
-               return -EINVAL;
-
-       if (start_index & (dev->caps.mtts_per_seg - 1))
-               return -EINVAL;
+       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+                              start_index, &dma_handle);
 
-       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
-                               s / dev->caps.mtt_entry_sz, &dma_handle);
        if (!mtts)
                return -ENOMEM;
 
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        return 0;
 }
 
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    int start_index, int npages, u64 *page_list)
 {
+       int err = 0;
        int chunk;
-       int err;
+       int mtts_per_page;
+       int max_mtts_first_page;
 
-       if (mtt->order < 0)
-               return -EINVAL;
+       /* compute how may mtts fit in the first page */
+       mtts_per_page = PAGE_SIZE / sizeof(u64);
+       max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+                             % mtts_per_page;
+
+       chunk = min_t(int, max_mtts_first_page, npages);
 
        while (npages > 0) {
-               chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
                err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
                if (err)
                        return err;
-
                npages      -= chunk;
                start_index += chunk;
                page_list   += chunk;
+
+               chunk = min_t(int, mtts_per_page, npages);
        }
+       return err;
+}
 
-       return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  int start_index, int npages, u64 *page_list)
+{
+       struct mlx4_cmd_mailbox *mailbox = NULL;
+       __be64 *inbox = NULL;
+       int chunk;
+       int err = 0;
+       int i;
+
+       if (mtt->order < 0)
+               return -EINVAL;
+
+       if (mlx4_is_mfunc(dev)) {
+               mailbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(mailbox))
+                       return PTR_ERR(mailbox);
+               inbox = mailbox->buf;
+
+               while (npages > 0) {
+                       chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+                                     npages);
+                       inbox[0] = cpu_to_be64(mtt->offset + start_index);
+                       inbox[1] = 0;
+                       for (i = 0; i < chunk; ++i)
+                               inbox[i + 2] = cpu_to_be64(page_list[i] |
+                                              MLX4_MTT_FLAG_PRESENT);
+                       err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+                       if (err) {
+                               mlx4_free_cmd_mailbox(dev, mailbox);
+                               return err;
+                       }
+
+                       npages      -= chunk;
+                       start_index += chunk;
+                       page_list   += chunk;
+               }
+               mlx4_free_cmd_mailbox(dev, mailbox);
+               return err;
+       }
+
+       return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
 }
 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
 
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
 
 int mlx4_init_mr_table(struct mlx4_dev *dev)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mr_table *mr_table = &priv->mr_table;
        int err;
 
+       if (!is_power_of_2(dev->caps.num_mpts))
+               return -EINVAL;
+
+       /* Nothing to do for slaves - all MR handling is forwarded
+       * to the master */
+       if (mlx4_is_slave(dev))
+               return 0;
+
        err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
                               ~0, dev->caps.reserved_mrws, 0);
        if (err)
                return err;
 
        err = mlx4_buddy_init(&mr_table->mtt_buddy,
-                             ilog2(dev->caps.num_mtt_segs));
+                             ilog2(dev->caps.num_mtts /
+                             (1 << log_mtts_per_seg)));
        if (err)
                goto err_buddy;
 
        if (dev->caps.reserved_mtts) {
-               if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+               priv->reserved_mtts =
+                       mlx4_alloc_mtt_range(dev,
+                                            fls(dev->caps.reserved_mtts - 1));
+               if (priv->reserved_mtts < 0) {
                        mlx4_warn(dev, "MTT table of order %d is too small.\n",
                                  mr_table->mtt_buddy.max_order);
                        err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
 
 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mr_table *mr_table = &priv->mr_table;
 
+       if (mlx4_is_slave(dev))
+               return;
+       if (priv->reserved_mtts >= 0)
+               mlx4_free_mtt_range(dev, priv->reserved_mtts,
+                                   fls(dev->caps.reserved_mtts - 1));
        mlx4_buddy_cleanup(&mr_table->mtt_buddy);
        mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
 }
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
                   int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       u64 mtt_seg;
+       u64 mtt_offset;
        int err = -ENOMEM;
 
        if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
        if (err)
                return err;
 
-       mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+       mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
 
        fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
-                                   fmr->mr.mtt.first_seg,
+                                   fmr->mr.mtt.offset,
                                    &fmr->dma_handle);
+
        if (!fmr->mtts) {
                err = -ENOMEM;
                goto err_free;
@@ -619,6 +852,46 @@ err_free:
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
 
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+                           u32 pd, u32 access, int max_pages,
+                           int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err = -ENOMEM;
+
+       if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+               return -EINVAL;
+
+       /* All MTTs must fit in the same page */
+       if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+               return -EINVAL;
+
+       fmr->page_shift = page_shift;
+       fmr->max_pages  = max_pages;
+       fmr->max_maps   = max_maps;
+       fmr->maps = 0;
+
+       err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+                                    page_shift, &fmr->mr);
+       if (err)
+               return err;
+
+       fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+                                   fmr->mr.mtt.offset,
+                                   &fmr->dma_handle);
+       if (!fmr->mtts) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
+       return 0;
+
+err_free:
+       mlx4_mr_free_reserved(dev, &fmr->mr);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
                    u32 *lkey, u32 *rkey)
 {
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+
        if (!fmr->maps)
                return;
 
        fmr->maps = 0;
 
-       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+                      " failed (%d)\n", err);
+               return;
+       }
+
+       err = mlx4_HW2SW_MPT(dev, NULL,
+                            key_to_hw_index(fmr->mr.key) &
+                            (dev->caps.num_mpts - 1));
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err) {
+               printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+                      err);
+               return;
+       }
+       fmr->mr.enabled = MLX4_MR_EN_SW;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
 
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
        if (fmr->maps)
                return -EBUSY;
 
-       fmr->mr.enabled = 0;
        mlx4_mr_free(dev, &fmr->mr);
+       fmr->mr.enabled = MLX4_MR_DISABLED;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
 
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+       if (fmr->maps)
+               return -EBUSY;
+
+       mlx4_mr_free_reserved(dev, &fmr->mr);
+       fmr->mr.enabled = MLX4_MR_DISABLED;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+                       MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
index 260ed259ce9ba8afebdeabadb66a79126e5c85e3..5c9a54df17aba89d233926d9f37710ac84c0e504 100644 (file)
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/io-mapping.h>
@@ -51,7 +52,8 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
        *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
        if (*pdn == -1)
                return -ENOMEM;
-
+       if (mlx4_is_mfunc(dev))
+               *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -85,7 +87,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
-                               (1 << 24) - 1, dev->caps.reserved_pds, 0);
+                               (1 << NOT_MASKED_PD_BITS) - 1,
+                                dev->caps.reserved_pds, 0);
 }
 
 void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +111,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
 
 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
 {
+       int offset;
+
        uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
        if (uar->index == -1)
                return -ENOMEM;
 
-       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+       if (mlx4_is_slave(dev))
+               offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+                                      dev->caps.uar_page_size);
+       else
+               offset = uar->index;
+       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
        uar->map = NULL;
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +241,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
 
        return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
                                dev->caps.num_uars, dev->caps.num_uars - 1,
-                               max(128, dev->caps.reserved_uars), 0);
+                               dev->caps.reserved_uars, 0);
 }
 
 void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
index d942aea4927b1aa1b9ec67ddd703d2227a290fee..88b52e547524efaee71ffc67f00b4f44e7844f30 100644 (file)
@@ -70,41 +70,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
        table->total = 0;
 }
 
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
-                                  __be64 *entries)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 in_mod;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
-       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
-                            u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
        struct mlx4_qp qp;
        u8 gid[16] = {0};
        int err;
 
-       if (reserve) {
-               err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
-               if (err) {
-                       mlx4_err(dev, "Failed to reserve qp for mac registration\n");
-                       return err;
-               }
-       }
        qp.qpn = *qpn;
 
        mac &= 0xffffffffffffULL;
@@ -113,16 +84,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
        gid[5] = port;
        gid[7] = MLX4_UC_STEER << 1;
 
-       err = mlx4_qp_attach_common(dev, &qp, gid, 0,
-                                   MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (err && reserve)
-               mlx4_qp_release_range(dev, *qpn, 1);
+       err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+       if (err)
+               mlx4_warn(dev, "Failed Attaching Unicast\n");
 
        return err;
 }
 
 static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
-                                 u64 mac, int qpn, u8 free)
+                                 u64 mac, int qpn)
 {
        struct mlx4_qp qp;
        u8 gid[16] = {0};
@@ -134,60 +104,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
        gid[5] = port;
        gid[7] = MLX4_UC_STEER << 1;
 
-       mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (free)
-               mlx4_qp_release_range(dev, qpn, 1);
+       mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+                         struct mlx4_mac_table *table, int index)
+{
+       int err = 0;
+
+       if (index < 0 || index >= table->max || !table->entries[index]) {
+               mlx4_warn(dev, "No valid Mac entry for the given index\n");
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+                     struct mlx4_mac_table *table, u64 mac)
+{
+       int i;
+
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               if ((mac & MLX4_MAC_MASK) ==
+                   (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+                       return i;
+       }
+       /* Mac not found */
+       return -EINVAL;
 }
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
        struct mlx4_mac_entry *entry;
-       int i, err = 0;
-       int free = -1;
+       int index = 0;
+       int err = 0;
 
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
-               if (err)
-                       return err;
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+                       (unsigned long long) mac);
+       index = mlx4_register_mac(dev, port, mac);
+       if (index < 0) {
+               err = index;
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) mac);
+               return err;
+       }
 
-               entry = kmalloc(sizeof *entry, GFP_KERNEL);
-               if (!entry) {
-                       mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                       return -ENOMEM;
-               }
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+               *qpn = info->base_qpn + index;
+               return 0;
+       }
+
+       err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+       mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+       if (err) {
+               mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+               goto qp_err;
+       }
+
+       err = mlx4_uc_steer_add(dev, port, mac, qpn);
+       if (err)
+               goto steer_err;
 
-               entry->mac = mac;
-               err = radix_tree_insert(&info->mac_tree, *qpn, entry);
-               if (err) {
+       entry = kmalloc(sizeof *entry, GFP_KERNEL);
+       if (!entry) {
+               err = -ENOMEM;
+               goto alloc_err;
+       }
+       entry->mac = mac;
+       err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+       if (err)
+               goto insert_err;
+       return 0;
+
+insert_err:
+       kfree(entry);
+
+alloc_err:
+       mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+       mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+       mlx4_unregister_mac(dev, port, mac);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_entry *entry;
+
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+                (unsigned long long) mac);
+       mlx4_unregister_mac(dev, port, mac);
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+               entry = radix_tree_lookup(&info->mac_tree, qpn);
+               if (entry) {
+                       mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+                                " qpn %d\n", port,
+                                (unsigned long long) mac, qpn);
+                       mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+                       mlx4_qp_release_range(dev, qpn, 1);
+                       radix_tree_delete(&info->mac_tree, qpn);
                        kfree(entry);
-                       mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                       return err;
                }
        }
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+                                  __be64 *entries)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_mod;
+       int err;
 
-       mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_table *table = &info->mac_table;
+       int i, err = 0;
+       int free = -1;
+
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+                (unsigned long long) mac, port);
 
        mutex_lock(&table->mutex);
-       for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
-               if (free < 0 && !table->refs[i]) {
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               if (free < 0 && !table->entries[i]) {
                        free = i;
                        continue;
                }
 
                if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
-                       /* MAC already registered, increase references count */
-                       ++table->refs[i];
+                       /* MAC already registered, Must not have duplicates */
+                       err = -EEXIST;
                        goto out;
                }
        }
 
-       if (free < 0) {
-               err = -ENOMEM;
-               goto out;
-       }
-
        mlx4_dbg(dev, "Free MAC index is %d\n", free);
 
        if (table->total == table->max) {
@@ -197,103 +271,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
        }
 
        /* Register new MAC */
-       table->refs[free] = 1;
        table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
 
        err = mlx4_set_port_mac_table(dev, port, table->entries);
        if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
-               table->refs[free] = 0;
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) mac);
                table->entries[free] = 0;
                goto out;
        }
 
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
-               *qpn = info->base_qpn + free;
+       err = free;
        ++table->total;
 out:
        mutex_unlock(&table->mutex);
        return err;
 }
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
 
-static int validate_index(struct mlx4_dev *dev,
-                         struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
-       int err = 0;
+       u64 out_param;
+       int err;
 
-       if (index < 0 || index >= table->max || !table->entries[index]) {
-               mlx4_warn(dev, "No valid Mac entry for the given index\n");
-               err = -EINVAL;
-       }
-       return err;
-}
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
 
-static int find_index(struct mlx4_dev *dev,
-                     struct mlx4_mac_table *table, u64 mac)
-{
-       int i;
-       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
-                       return i;
+               return get_param_l(&out_param);
        }
-       /* Mac not found */
-       return -EINVAL;
+       return __mlx4_register_mac(dev, port, mac);
 }
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
 
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
-       struct mlx4_mac_entry *entry;
+       int index;
 
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               entry = radix_tree_lookup(&info->mac_tree, qpn);
-               if (entry) {
-                       mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
-                       radix_tree_delete(&info->mac_tree, qpn);
-                       index = find_index(dev, table, entry->mac);
-                       kfree(entry);
-               }
-       }
+       index = find_index(dev, table, mac);
 
        mutex_lock(&table->mutex);
 
        if (validate_index(dev, table, index))
                goto out;
 
-       /* Check whether this address has reference count */
-       if (!(--table->refs[index])) {
-               table->entries[index] = 0;
-               mlx4_set_port_mac_table(dev, port, table->entries);
-               --table->total;
-       }
+       table->entries[index] = 0;
+       mlx4_set_port_mac_table(dev, port, table->entries);
+       --table->total;
 out:
        mutex_unlock(&table->mutex);
 }
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               return;
+       }
+       __mlx4_unregister_mac(dev, port, mac);
+       return;
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
 
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
        struct mlx4_mac_entry *entry;
-       int err;
+       int index = qpn - info->base_qpn;
+       int err = 0;
 
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
                entry = radix_tree_lookup(&info->mac_tree, qpn);
                if (!entry)
                        return -EINVAL;
-               index = find_index(dev, table, entry->mac);
-               mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+               mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+               mlx4_unregister_mac(dev, port, entry->mac);
                entry->mac = new_mac;
-               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
-               if (err || index < 0)
-                       return err;
+               mlx4_register_mac(dev, port, new_mac);
+               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+               return err;
        }
 
+       /* CX1 doesn't support multi-functions */
        mutex_lock(&table->mutex);
 
        err = validate_index(dev, table, index);
@@ -304,7 +378,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
 
        err = mlx4_set_port_mac_table(dev, port, table->entries);
        if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) new_mac);
                table->entries[index] = 0;
        }
 out:
@@ -312,6 +387,7 @@ out:
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
                                    __be32 *entries)
 {
@@ -326,7 +402,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
        memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
        in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
        err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -352,7 +428,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
 }
 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
 
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+                               int *index)
 {
        struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
        int i, err = 0;
@@ -387,7 +464,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
                goto out;
        }
 
-       /* Register new MAC */
+       /* Register new VLAN */
        table->refs[free] = 1;
        table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
 
@@ -405,9 +482,27 @@ out:
        mutex_unlock(&table->mutex);
        return err;
 }
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (!err)
+                       *index = get_param_l(&out_param);
+
+               return err;
+       }
+       return __mlx4_register_vlan(dev, port, vlan, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
 
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
 {
        struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
 
@@ -432,6 +527,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
 out:
        mutex_unlock(&table->mutex);
 }
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, port);
+               err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+                              MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                              MLX4_CMD_WRAPPED);
+               if (!err)
+                       mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+                                       index);
+
+               return;
+       }
+       __mlx4_unregister_vlan(dev, port, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
 
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +576,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
        *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
        err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
        if (!err)
                *caps = *(__be32 *) (outbuf + 84);
        mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +614,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
        *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
        err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
 
        packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
 
@@ -512,6 +628,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
        return err;
 }
 
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+                               u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_port_info *port_info;
+       struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+       struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+       struct mlx4_set_port_rqp_calc_context *qpn_context;
+       struct mlx4_set_port_general_context *gen_context;
+       int reset_qkey_viols;
+       int port;
+       int is_eth;
+       u32 in_modifier;
+       u32 promisc;
+       u16 mtu, prev_mtu;
+       int err;
+       int i;
+       __be32 agg_cap_mask;
+       __be32 slave_cap_mask;
+       __be32 new_cap_mask;
+
+       port = in_mod & 0xff;
+       in_modifier = in_mod >> 8;
+       is_eth = op_mod;
+       port_info = &priv->port[port];
+
+       /* Slaves cannot perform SET_PORT operations except changing MTU */
+       if (is_eth) {
+               if (slave != dev->caps.function &&
+                   in_modifier != MLX4_SET_PORT_GENERAL) {
+                       mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+                                       slave);
+                       return -EINVAL;
+               }
+               switch (in_modifier) {
+               case MLX4_SET_PORT_RQP_CALC:
+                       qpn_context = inbox->buf;
+                       qpn_context->base_qpn =
+                               cpu_to_be32(port_info->base_qpn);
+                       qpn_context->n_mac = 0x7;
+                       promisc = be32_to_cpu(qpn_context->promisc) >>
+                               SET_PORT_PROMISC_SHIFT;
+                       qpn_context->promisc = cpu_to_be32(
+                               promisc << SET_PORT_PROMISC_SHIFT |
+                               port_info->base_qpn);
+                       promisc = be32_to_cpu(qpn_context->mcast) >>
+                               SET_PORT_MC_PROMISC_SHIFT;
+                       qpn_context->mcast = cpu_to_be32(
+                               promisc << SET_PORT_MC_PROMISC_SHIFT |
+                               port_info->base_qpn);
+                       break;
+               case MLX4_SET_PORT_GENERAL:
+                       gen_context = inbox->buf;
+                       /* Mtu is configured as the max MTU among all the
+                        * the functions on the port. */
+                       mtu = be16_to_cpu(gen_context->mtu);
+                       mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+                       prev_mtu = slave_st->mtu[port];
+                       slave_st->mtu[port] = mtu;
+                       if (mtu > master->max_mtu[port])
+                               master->max_mtu[port] = mtu;
+                       if (mtu < prev_mtu && prev_mtu ==
+                                               master->max_mtu[port]) {
+                               slave_st->mtu[port] = mtu;
+                               master->max_mtu[port] = mtu;
+                               for (i = 0; i < dev->num_slaves; i++) {
+                                       master->max_mtu[port] =
+                                       max(master->max_mtu[port],
+                                           master->slave_state[i].mtu[port]);
+                               }
+                       }
+
+                       gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+                       break;
+               }
+               return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+                               MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                               MLX4_CMD_NATIVE);
+       }
+
+       /* For IB, we only consider:
+        * - The capability mask, which is set to the aggregate of all
+        *   slave function capabilities
+        * - The QKey violatin counter - reset according to each request.
+        */
+
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+               reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+               new_cap_mask = ((__be32 *) inbox->buf)[2];
+       } else {
+               reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+               new_cap_mask = ((__be32 *) inbox->buf)[1];
+       }
+
+       agg_cap_mask = 0;
+       slave_cap_mask =
+               priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+       priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+       for (i = 0; i < dev->num_slaves; i++)
+               agg_cap_mask |=
+                       priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+       /* only clear mailbox for guests.  Master may be setting
+       * MTU or PKEY table size
+       */
+       if (slave != dev->caps.function)
+               memset(inbox->buf, 0, 256);
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+               *(u8 *) inbox->buf         = !!reset_qkey_viols << 6;
+               ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+       } else {
+               ((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+               ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       if (err)
+               priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+                       slave_cap_mask;
+       return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+                                   vhcr->op_modifier, inbox);
+}
+
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +777,127 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 
        ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
        err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_general_context *context;
+       int err;
+       u32 in_mod;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->flags = SET_PORT_GEN_ALL_VALID;
+       context->mtu = cpu_to_be16(mtu);
+       context->pptx = (pptx * (!pfctx)) << 7;
+       context->pfctx = pfctx;
+       context->pprx = (pprx * (!pfcrx)) << 7;
+       context->pfcrx = pfcrx;
+
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+                          u8 promisc)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_rqp_calc_context *context;
+       int err;
+       u32 in_mod;
+       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+               MCAST_DIRECT : MCAST_DEFAULT;
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
+           dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+               return 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->base_qpn = cpu_to_be32(base_qpn);
+       context->n_mac = dev->caps.log_num_macs;
+       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+                                      base_qpn);
+       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+                                    base_qpn);
+       context->intra_no_vlan = 0;
+       context->no_vlan = MLX4_NO_VLAN_IDX;
+       context->intra_vlan_miss = 0;
+       context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       int err = 0;
+
+       return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+                       u64 mac, u64 clear, u8 mode)
+{
+       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int err = 0;
+
+       return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+                              u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+       return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+                           MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+                           MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       return mlx4_common_dump_eth_stats(dev, slave,
+                                         vhcr->in_modifier, outbox);
+}
index b967647d0c762f4422e0375b7a27606ab64b8627..66f91ca7a7c6c277778e58d5b4a59b749ab9becf 100644 (file)
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
        profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
        profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
        profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
-       profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
-       profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
+       profile[MLX4_RES_MTT].size    = dev_cap->mtt_entry_sz;
+       profile[MLX4_RES_MCG].size    = mlx4_get_mgm_entry_size(dev);
 
        profile[MLX4_RES_QP].num      = request->num_qp;
        profile[MLX4_RES_RDMARC].num  = request->num_qp * request->rdmarc_per_qp;
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        init_hca->cmpt_base      = profile[i].start;
                        break;
                case MLX4_RES_MTT:
-                       dev->caps.num_mtt_segs   = profile[i].num;
+                       dev->caps.num_mtts       = profile[i].num;
                        priv->mr_table.mtt_base  = profile[i].start;
                        init_hca->mtt_base       = profile[i].start;
                        break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        dev->caps.num_mgms        = profile[i].num >> 1;
                        dev->caps.num_amgms       = profile[i].num >> 1;
                        init_hca->mc_base         = profile[i].start;
-                       init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+                       init_hca->log_mc_entry_sz =
+                                       ilog2(mlx4_get_mgm_entry_size(dev));
                        init_hca->log_mc_table_sz = profile[i].log_num;
                        init_hca->log_mc_hash_sz  = profile[i].log_num - 1;
                        break;
index 15f870cb2590b778fa38a3096e19a9957d8ca29d..6b03ac8b9002bdb530b72969ccb0945f19f49c92 100644 (file)
@@ -35,6 +35,8 @@
 
 #include <linux/gfp.h>
 #include <linux/export.h>
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/qp.h>
 
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
        spin_unlock(&qp_table->lock);
 
        if (!qp) {
-               mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+               mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
                return;
        }
 
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
                complete(&qp->free);
 }
 
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
-                  struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
-                  int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+       return qp->qpn >= dev->caps.sqp_start &&
+               qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+                    struct mlx4_qp_context *context,
+                    enum mlx4_qp_optpar optpar,
+                    int sqd_event, struct mlx4_qp *qp, int native)
 {
        static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
                [MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                }
        };
 
+       struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
        int ret = 0;
+       u8 port;
 
        if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
            !op[cur_state][new_state])
                return -EINVAL;
 
-       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
-               return mlx4_cmd(dev, 0, qp->qpn, 2,
-                               MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+               ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+                       MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+               if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+                   cur_state != MLX4_QP_STATE_RST &&
+                   is_qp0(dev, qp)) {
+                       port = (qp->qpn & 1) + 1;
+                       priv->mfunc.master.qp0_state[port].qp0_active = 0;
+               }
+               return ret;
+       }
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
        }
 
+       port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+               context->pri_path.sched_queue = (context->pri_path.sched_queue &
+                                               0xc3);
+
        *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
        memcpy(mailbox->buf + 8, context, sizeof *context);
 
        ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
                cpu_to_be32(qp->qpn);
 
-       ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+       ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+                      qp->qpn | (!!sqd_event << 31),
                       new_state == MLX4_QP_STATE_RST ? 2 : 0,
-                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return ret;
 }
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+                  struct mlx4_qp_context *context,
+                  enum mlx4_qp_optpar optpar,
+                  int sqd_event, struct mlx4_qp *qp)
+{
+       return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+                               optpar, sqd_event, qp, 0);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
 
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                                  int *base)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
-       int qpn;
 
-       qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
-       if (qpn == -1)
+       *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+       if (*base == -1)
                return -ENOMEM;
 
-       *base = qpn;
        return 0;
 }
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, cnt);
+               set_param_h(&in_param, align);
+               err = mlx4_cmd_imm(dev, in_param, &out_param,
+                                  RES_QP, RES_OP_RESERVE,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
+
+               *base = get_param_l(&out_param);
+               return 0;
+       }
+       return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
 
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
-       if (base_qpn < dev->caps.sqp_start + 8)
-               return;
 
+       if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+               return;
        mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
 }
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, base_qpn);
+               set_param_h(&in_param, cnt);
+               err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err) {
+                       mlx4_warn(dev, "Failed to release qp range"
+                                 " base:%d cnt:%d\n", base_qpn, cnt);
+               }
+       } else
+                __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
 
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
        int err;
 
-       if (!qpn)
-               return -EINVAL;
-
-       qp->qpn = qpn;
-
-       err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
        if (err)
                goto err_out;
 
-       err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
        if (err)
                goto err_put_qp;
 
-       err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
        if (err)
                goto err_put_auxc;
 
-       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
        if (err)
                goto err_put_altc;
 
-       err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
        if (err)
                goto err_put_rdmarc;
 
-       spin_lock_irq(&qp_table->lock);
-       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
-       spin_unlock_irq(&qp_table->lock);
-       if (err)
-               goto err_put_cmpt;
-
-       atomic_set(&qp->refcount, 1);
-       init_completion(&qp->free);
-
        return 0;
 
-err_put_cmpt:
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
 err_put_rdmarc:
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
 
 err_put_altc:
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->altc_table, qpn);
 
 err_put_auxc:
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->auxc_table, qpn);
 
 err_put_qp:
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->qp_table, qpn);
 
 err_out:
        return err;
 }
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+       u64 param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&param, qpn);
+               return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+                                   MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+                                   MLX4_CMD_WRAPPED);
+       }
+       return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+       mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+       mlx4_table_put(dev, &qp_table->altc_table, qpn);
+       mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+       mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, qpn);
+               if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+                            MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                            MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+       } else
+               __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+       int err;
+
+       if (!qpn)
+               return -EINVAL;
+
+       qp->qpn = qpn;
+
+       err = mlx4_qp_alloc_icm(dev, qpn);
+       if (err)
+               return err;
+
+       spin_lock_irq(&qp_table->lock);
+       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+                               (dev->caps.num_qps - 1), qp);
+       spin_unlock_irq(&qp_table->lock);
+       if (err)
+               goto err_icm;
+
+       atomic_set(&qp->refcount, 1);
+       init_completion(&qp->free);
+
+       return 0;
+
+err_icm:
+       mlx4_qp_free_icm(dev, qpn);
+       return err;
+}
+
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
 
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
-       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
        if (atomic_dec_and_test(&qp->refcount))
                complete(&qp->free);
        wait_for_completion(&qp->free);
 
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+       mlx4_qp_free_icm(dev, qp->qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_free);
 
 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
 {
        return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
        spin_lock_init(&qp_table->lock);
        INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        /*
         * We reserve 2 extra QPs per port for the special QPs.  The
@@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
+
        mlx4_CONF_SPECIAL_QP(dev, 0);
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
 }
@@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
                return PTR_ERR(mailbox);
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
-                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_WRAPPED);
        if (!err)
                memcpy(context, mailbox->buf + 8, sizeof *context);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644 (file)
index 0000000..ed20751
--- /dev/null
@@ -0,0 +1,3104 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID         (1ull << 63)
+#define MLX4_MAC_MASK          0x7fffffffffffffffULL
+#define ETH_ALEN               6
+
+struct mac_res {
+       struct list_head list;
+       u64 mac;
+       u8 port;
+};
+
+struct res_common {
+       struct list_head        list;
+       u32                     res_id;
+       int                     owner;
+       int                     state;
+       int                     from_state;
+       int                     to_state;
+       int                     removing;
+};
+
+enum {
+       RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+       struct list_head        list;
+       u8                      gid[16];
+       enum mlx4_protocol      prot;
+};
+
+enum res_qp_states {
+       RES_QP_BUSY = RES_ANY_BUSY,
+
+       /* QP number was allocated */
+       RES_QP_RESERVED,
+
+       /* ICM memory for QP context was mapped */
+       RES_QP_MAPPED,
+
+       /* QP is in hw ownership */
+       RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+       switch (state) {
+       case RES_QP_BUSY: return "RES_QP_BUSY";
+       case RES_QP_RESERVED: return "RES_QP_RESERVED";
+       case RES_QP_MAPPED: return "RES_QP_MAPPED";
+       case RES_QP_HW: return "RES_QP_HW";
+       default: return "Unknown";
+       }
+}
+
+struct res_qp {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       struct res_cq          *rcq;
+       struct res_cq          *scq;
+       struct res_srq         *srq;
+       struct list_head        mcg_list;
+       spinlock_t              mcg_spl;
+       int                     local_qpn;
+};
+
+enum res_mtt_states {
+       RES_MTT_BUSY = RES_ANY_BUSY,
+       RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+       switch (state) {
+       case RES_MTT_BUSY: return "RES_MTT_BUSY";
+       case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+       default: return "Unknown";
+       }
+}
+
+struct res_mtt {
+       struct res_common       com;
+       int                     order;
+       atomic_t                ref_count;
+};
+
+enum res_mpt_states {
+       RES_MPT_BUSY = RES_ANY_BUSY,
+       RES_MPT_RESERVED,
+       RES_MPT_MAPPED,
+       RES_MPT_HW,
+};
+
+struct res_mpt {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       int                     key;
+};
+
+enum res_eq_states {
+       RES_EQ_BUSY = RES_ANY_BUSY,
+       RES_EQ_RESERVED,
+       RES_EQ_HW,
+};
+
+struct res_eq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+};
+
+enum res_cq_states {
+       RES_CQ_BUSY = RES_ANY_BUSY,
+       RES_CQ_ALLOCATED,
+       RES_CQ_HW,
+};
+
+struct res_cq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       atomic_t                ref_count;
+};
+
+enum res_srq_states {
+       RES_SRQ_BUSY = RES_ANY_BUSY,
+       RES_SRQ_ALLOCATED,
+       RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+       switch (state) {
+       case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+       case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+       case RES_SRQ_HW: return "RES_SRQ_HW";
+       default: return "Unknown";
+       }
+}
+
+struct res_srq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       struct res_cq          *cq;
+       atomic_t                ref_count;
+};
+
+enum res_counter_states {
+       RES_COUNTER_BUSY = RES_ANY_BUSY,
+       RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+       switch (state) {
+       case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+       case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+       default: return "Unknown";
+       }
+}
+
+struct res_counter {
+       struct res_common       com;
+       int                     port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+       switch (rt) {
+       case RES_QP: return "RES_QP";
+       case RES_CQ: return "RES_CQ";
+       case RES_SRQ: return "RES_SRQ";
+       case RES_MPT: return "RES_MPT";
+       case RES_MTT: return "RES_MTT";
+       case RES_MAC: return  "RES_MAC";
+       case RES_EQ: return "RES_EQ";
+       case RES_COUNTER: return "RES_COUNTER";
+       default: return "Unknown resource type !!!";
+       };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+       int t;
+
+       priv->mfunc.master.res_tracker.slave_list =
+               kzalloc(dev->num_slaves * sizeof(struct slave_list),
+                       GFP_KERNEL);
+       if (!priv->mfunc.master.res_tracker.slave_list)
+               return -ENOMEM;
+
+       for (i = 0 ; i < dev->num_slaves; i++) {
+               for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+                       INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+                                      slave_list[i].res_list[t]);
+               mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+       }
+
+       mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+                dev->num_slaves);
+       for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+               INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+                               GFP_ATOMIC|__GFP_NOWARN);
+
+       spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+       return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+
+       if (priv->mfunc.master.res_tracker.slave_list) {
+               for (i = 0 ; i < dev->num_slaves; i++)
+                       mlx4_delete_all_resources_for_slave(dev, i);
+
+               kfree(priv->mfunc.master.res_tracker.slave_list);
+       }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+                         struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+       u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+       if (MLX4_QP_ST_UD == ts)
+               qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+       mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+               slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+       return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+                     enum mlx4_resource type)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+                                res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+                  enum mlx4_resource type,
+                  void *res)
+{
+       struct res_common *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = find_res(dev, res_id, type);
+       if (!r) {
+               err = -ENONET;
+               goto exit;
+       }
+
+       if (r->state == RES_ANY_BUSY) {
+               err = -EBUSY;
+               goto exit;
+       }
+
+       if (r->owner != slave) {
+               err = -EPERM;
+               goto exit;
+       }
+
+       r->from_state = r->state;
+       r->state = RES_ANY_BUSY;
+       mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+                ResourceType(type), r->res_id);
+
+       if (res)
+               *((struct res_common **)res) = r;
+
+exit:
+       spin_unlock_irq(mlx4_tlock(dev));
+       return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+                                   enum mlx4_resource type,
+                                   int res_id, int *slave)
+{
+
+       struct res_common *r;
+       int err = -ENOENT;
+       int id = res_id;
+
+       if (type == RES_QP)
+               id &= 0x7fffff;
+       spin_lock(mlx4_tlock(dev));
+
+       r = find_res(dev, id, type);
+       if (r) {
+               *slave = r->owner;
+               err = 0;
+       }
+       spin_unlock(mlx4_tlock(dev));
+
+       return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+                   enum mlx4_resource type)
+{
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = find_res(dev, res_id, type);
+       if (r)
+               r->state = r->from_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+       struct res_qp *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_QP_RESERVED;
+       INIT_LIST_HEAD(&ret->mcg_list);
+       spin_lock_init(&ret->mcg_spl);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+       struct res_mtt *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->order = order;
+       ret->com.state = RES_MTT_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+       struct res_mpt *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_MPT_RESERVED;
+       ret->key = key;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+       struct res_eq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_EQ_RESERVED;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+       struct res_cq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_CQ_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+       struct res_srq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_SRQ_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+       struct res_counter *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_COUNTER_ALLOCATED;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+                                  int extra)
+{
+       struct res_common *ret;
+
+       switch (type) {
+       case RES_QP:
+               ret = alloc_qp_tr(id);
+               break;
+       case RES_MPT:
+               ret = alloc_mpt_tr(id, extra);
+               break;
+       case RES_MTT:
+               ret = alloc_mtt_tr(id, extra);
+               break;
+       case RES_EQ:
+               ret = alloc_eq_tr(id);
+               break;
+       case RES_CQ:
+               ret = alloc_cq_tr(id);
+               break;
+       case RES_SRQ:
+               ret = alloc_srq_tr(id);
+               break;
+       case RES_MAC:
+               printk(KERN_ERR "implementation missing\n");
+               return NULL;
+       case RES_COUNTER:
+               ret = alloc_counter_tr(id);
+               break;
+
+       default:
+               return NULL;
+       }
+       if (ret)
+               ret->owner = slave;
+
+       return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+                        enum mlx4_resource type, int extra)
+{
+       int i;
+       int err;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct res_common **res_arr;
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct radix_tree_root *root = &tracker->res_tree[type];
+
+       res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+       if (!res_arr)
+               return -ENOMEM;
+
+       for (i = 0; i < count; ++i) {
+               res_arr[i] = alloc_tr(base + i, type, slave, extra);
+               if (!res_arr[i]) {
+                       for (--i; i >= 0; --i)
+                               kfree(res_arr[i]);
+
+                       kfree(res_arr);
+                       return -ENOMEM;
+               }
+       }
+
+       spin_lock_irq(mlx4_tlock(dev));
+       for (i = 0; i < count; ++i) {
+               if (find_res(dev, base + i, type)) {
+                       err = -EEXIST;
+                       goto undo;
+               }
+               err = radix_tree_insert(root, base + i, res_arr[i]);
+               if (err)
+                       goto undo;
+               list_add_tail(&res_arr[i]->list,
+                             &tracker->slave_list[slave].res_list[type]);
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+       kfree(res_arr);
+
+       return 0;
+
+undo:
+       for (--i; i >= base; --i)
+               radix_tree_delete(&tracker->res_tree[type], i);
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       for (i = 0; i < count; ++i)
+               kfree(res_arr[i]);
+
+       kfree(res_arr);
+
+       return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+       if (res->com.state == RES_QP_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_QP_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+       if (res->com.state == RES_MTT_BUSY ||
+           atomic_read(&res->ref_count)) {
+               printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+                      __func__, __LINE__,
+                      mtt_states_str(res->com.state),
+                      atomic_read(&res->ref_count));
+               return -EBUSY;
+       } else if (res->com.state != RES_MTT_ALLOCATED)
+               return -EPERM;
+       else if (res->order != order)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+       if (res->com.state == RES_MPT_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_MPT_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+       if (res->com.state == RES_MPT_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_MPT_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+       if (res->com.state == RES_COUNTER_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_COUNTER_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+       if (res->com.state == RES_CQ_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_CQ_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+       if (res->com.state == RES_SRQ_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_SRQ_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+       switch (type) {
+       case RES_QP:
+               return remove_qp_ok((struct res_qp *)res);
+       case RES_CQ:
+               return remove_cq_ok((struct res_cq *)res);
+       case RES_SRQ:
+               return remove_srq_ok((struct res_srq *)res);
+       case RES_MPT:
+               return remove_mpt_ok((struct res_mpt *)res);
+       case RES_MTT:
+               return remove_mtt_ok((struct res_mtt *)res, extra);
+       case RES_MAC:
+               return -ENOSYS;
+       case RES_EQ:
+               return remove_eq_ok((struct res_eq *)res);
+       case RES_COUNTER:
+               return remove_counter_ok((struct res_counter *)res);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+                        enum mlx4_resource type, int extra)
+{
+       int i;
+       int err;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       for (i = base; i < base + count; ++i) {
+               r = radix_tree_lookup(&tracker->res_tree[type], i);
+               if (!r) {
+                       err = -ENOENT;
+                       goto out;
+               }
+               if (r->owner != slave) {
+                       err = -EPERM;
+                       goto out;
+               }
+               err = remove_ok(r, type, extra);
+               if (err)
+                       goto out;
+       }
+
+       for (i = base; i < base + count; ++i) {
+               r = radix_tree_lookup(&tracker->res_tree[type], i);
+               radix_tree_delete(&tracker->res_tree[type], i);
+               list_del(&r->list);
+               kfree(r);
+       }
+       err = 0;
+
+out:
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+                               enum res_qp_states state, struct res_qp **qp,
+                               int alloc)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_qp *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_QP_BUSY:
+                       mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+                                __func__, r->com.res_id);
+                       err = -EBUSY;
+                       break;
+
+               case RES_QP_RESERVED:
+                       if (r->com.state == RES_QP_MAPPED && !alloc)
+                               break;
+
+                       mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+                       err = -EINVAL;
+                       break;
+
+               case RES_QP_MAPPED:
+                       if ((r->com.state == RES_QP_RESERVED && alloc) ||
+                           r->com.state == RES_QP_HW)
+                               break;
+                       else {
+                               mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+                                         r->com.res_id);
+                               err = -EINVAL;
+                       }
+
+                       break;
+
+               case RES_QP_HW:
+                       if (r->com.state != RES_QP_MAPPED)
+                               err = -EINVAL;
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_QP_BUSY;
+                       if (qp)
+                               *qp = (struct res_qp *)r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                               enum res_mpt_states state, struct res_mpt **mpt)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_mpt *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_MPT_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_MPT_RESERVED:
+                       if (r->com.state != RES_MPT_MAPPED)
+                               err = -EINVAL;
+                       break;
+
+               case RES_MPT_MAPPED:
+                       if (r->com.state != RES_MPT_RESERVED &&
+                           r->com.state != RES_MPT_HW)
+                               err = -EINVAL;
+                       break;
+
+               case RES_MPT_HW:
+                       if (r->com.state != RES_MPT_MAPPED)
+                               err = -EINVAL;
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_MPT_BUSY;
+                       if (mpt)
+                               *mpt = (struct res_mpt *)r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                               enum res_eq_states state, struct res_eq **eq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_eq *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_EQ_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_EQ_RESERVED:
+                       if (r->com.state != RES_EQ_HW)
+                               err = -EINVAL;
+                       break;
+
+               case RES_EQ_HW:
+                       if (r->com.state != RES_EQ_RESERVED)
+                               err = -EINVAL;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_EQ_BUSY;
+                       if (eq)
+                               *eq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+                               enum res_cq_states state, struct res_cq **cq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_cq *r;
+       int err;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_CQ_BUSY:
+                       err = -EBUSY;
+                       break;
+
+               case RES_CQ_ALLOCATED:
+                       if (r->com.state != RES_CQ_HW)
+                               err = -EINVAL;
+                       else if (atomic_read(&r->ref_count))
+                               err = -EBUSY;
+                       else
+                               err = 0;
+                       break;
+
+               case RES_CQ_HW:
+                       if (r->com.state != RES_CQ_ALLOCATED)
+                               err = -EINVAL;
+                       else
+                               err = 0;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_CQ_BUSY;
+                       if (cq)
+                               *cq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                                enum res_cq_states state, struct res_srq **srq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_srq *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_SRQ_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_SRQ_ALLOCATED:
+                       if (r->com.state != RES_SRQ_HW)
+                               err = -EINVAL;
+                       else if (atomic_read(&r->ref_count))
+                               err = -EBUSY;
+                       break;
+
+               case RES_SRQ_HW:
+                       if (r->com.state != RES_SRQ_ALLOCATED)
+                               err = -EINVAL;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_SRQ_BUSY;
+                       if (srq)
+                               *srq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+                          enum mlx4_resource type, int id)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[type], id);
+       if (r && (r->owner == slave))
+               r->state = r->from_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+                        enum mlx4_resource type, int id)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[type], id);
+       if (r && (r->owner == slave))
+               r->state = r->to_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+       return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int err;
+       int count;
+       int align;
+       int base;
+       int qpn;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               count = get_param_l(&in_param);
+               align = get_param_h(&in_param);
+               err = __mlx4_qp_reserve_range(dev, count, align, &base);
+               if (err)
+                       return err;
+
+               err = add_res_range(dev, slave, base, count, RES_QP, 0);
+               if (err) {
+                       __mlx4_qp_release_range(dev, base, count);
+                       return err;
+               }
+               set_param_l(out_param, base);
+               break;
+       case RES_OP_MAP_ICM:
+               qpn = get_param_l(&in_param) & 0x7fffff;
+               if (valid_reserved(dev, slave, qpn)) {
+                       err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+                       if (err)
+                               return err;
+               }
+
+               err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+                                          NULL, 1);
+               if (err)
+                       return err;
+
+               if (!valid_reserved(dev, slave, qpn)) {
+                       err = __mlx4_qp_alloc_icm(dev, qpn);
+                       if (err) {
+                               res_abort_move(dev, slave, RES_QP, qpn);
+                               return err;
+                       }
+               }
+
+               res_end_move(dev, slave, RES_QP, qpn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int base;
+       int order;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       order = get_param_l(&in_param);
+       base = __mlx4_alloc_mtt_range(dev, order);
+       if (base == -1)
+               return -ENOMEM;
+
+       err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+       if (err)
+               __mlx4_free_mtt_range(dev, base, order);
+       else
+               set_param_l(out_param, base);
+
+       return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int index;
+       int id;
+       struct res_mpt *mpt;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               index = __mlx4_mr_reserve(dev);
+               if (index == -1)
+                       break;
+               id = index & mpt_mask(dev);
+
+               err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+               if (err) {
+                       __mlx4_mr_release(dev, index);
+                       break;
+               }
+               set_param_l(out_param, index);
+               break;
+       case RES_OP_MAP_ICM:
+               index = get_param_l(&in_param);
+               id = index & mpt_mask(dev);
+               err = mr_res_start_move_to(dev, slave, id,
+                                          RES_MPT_MAPPED, &mpt);
+               if (err)
+                       return err;
+
+               err = __mlx4_mr_alloc_icm(dev, mpt->key);
+               if (err) {
+                       res_abort_move(dev, slave, RES_MPT, id);
+                       return err;
+               }
+
+               res_end_move(dev, slave, RES_MPT, id);
+               break;
+       }
+       return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int cqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               err = __mlx4_cq_alloc_icm(dev, &cqn);
+               if (err)
+                       break;
+
+               err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+               if (err) {
+                       __mlx4_cq_free_icm(dev, cqn);
+                       break;
+               }
+
+               set_param_l(out_param, cqn);
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int srqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               err = __mlx4_srq_alloc_icm(dev, &srqn);
+               if (err)
+                       break;
+
+               err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+               if (err) {
+                       __mlx4_srq_free_icm(dev, srqn);
+                       break;
+               }
+
+               set_param_l(out_param, srqn);
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct mac_res *res;
+
+       res = kzalloc(sizeof *res, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+       res->mac = mac;
+       res->port = (u8) port;
+       list_add_tail(&res->list,
+                     &tracker->slave_list[slave].res_list[RES_MAC]);
+       return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+                              int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               if (res->mac == mac && res->port == (u8) port) {
+                       list_del(&res->list);
+                       kfree(res);
+                       break;
+               }
+       }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               list_del(&res->list);
+               __mlx4_unregister_mac(dev, res->port, res->mac);
+               kfree(res);
+       }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int port;
+       u64 mac;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       port = get_param_l(out_param);
+       mac = in_param;
+
+       err = __mlx4_register_mac(dev, port, mac);
+       if (err >= 0) {
+               set_param_l(out_param, err);
+               err = 0;
+       }
+
+       if (!err) {
+               err = mac_add_to_slave(dev, slave, mac, port);
+               if (err)
+                       __mlx4_unregister_mac(dev, port, mac);
+       }
+       return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int alop = vhcr->op_modifier;
+
+       switch (vhcr->in_modifier) {
+       case RES_QP:
+               err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MTT:
+               err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MPT:
+               err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_CQ:
+               err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_SRQ:
+               err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MAC:
+               err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_VLAN:
+               err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                      u64 in_param)
+{
+       int err;
+       int count;
+       int base;
+       int qpn;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               base = get_param_l(&in_param) & 0x7fffff;
+               count = get_param_h(&in_param);
+               err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+               if (err)
+                       break;
+               __mlx4_qp_release_range(dev, base, count);
+               break;
+       case RES_OP_MAP_ICM:
+               qpn = get_param_l(&in_param) & 0x7fffff;
+               err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+                                          NULL, 0);
+               if (err)
+                       return err;
+
+               if (!valid_reserved(dev, slave, qpn))
+                       __mlx4_qp_free_icm(dev, qpn);
+
+               res_end_move(dev, slave, RES_QP, qpn);
+
+               if (valid_reserved(dev, slave, qpn))
+                       err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int base;
+       int order;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       base = get_param_l(&in_param);
+       order = get_param_h(&in_param);
+       err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+       if (!err)
+               __mlx4_free_mtt_range(dev, base, order);
+       return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param)
+{
+       int err = -EINVAL;
+       int index;
+       int id;
+       struct res_mpt *mpt;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               index = get_param_l(&in_param);
+               id = index & mpt_mask(dev);
+               err = get_res(dev, slave, id, RES_MPT, &mpt);
+               if (err)
+                       break;
+               index = mpt->key;
+               put_res(dev, slave, id, RES_MPT);
+
+               err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+               if (err)
+                       break;
+               __mlx4_mr_release(dev, index);
+               break;
+       case RES_OP_MAP_ICM:
+                       index = get_param_l(&in_param);
+                       id = index & mpt_mask(dev);
+                       err = mr_res_start_move_to(dev, slave, id,
+                                                  RES_MPT_RESERVED, &mpt);
+                       if (err)
+                               return err;
+
+                       __mlx4_mr_free_icm(dev, mpt->key);
+                       res_end_move(dev, slave, RES_MPT, id);
+                       return err;
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                      u64 in_param, u64 *out_param)
+{
+       int cqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               cqn = get_param_l(&in_param);
+               err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+               if (err)
+                       break;
+
+               __mlx4_cq_free_icm(dev, cqn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int srqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               srqn = get_param_l(&in_param);
+               err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+               if (err)
+                       break;
+
+               __mlx4_srq_free_icm(dev, srqn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                           u64 in_param, u64 *out_param)
+{
+       int port;
+       int err = 0;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               port = get_param_l(out_param);
+               mac_del_from_slave(dev, slave, in_param, port);
+               __mlx4_unregister_mac(dev, port, in_param);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                           u64 in_param, u64 *out_param)
+{
+       return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err = -EINVAL;
+       int alop = vhcr->op_modifier;
+
+       switch (vhcr->in_modifier) {
+       case RES_QP:
+               err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+                                 vhcr->in_param);
+               break;
+
+       case RES_MTT:
+               err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MPT:
+               err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param);
+               break;
+
+       case RES_CQ:
+               err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+                                 vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_SRQ:
+               err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MAC:
+               err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_VLAN:
+               err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       default:
+               break;
+       }
+       return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+       return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+       return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+       return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
+{
+       return be32_to_cpu(mpt->pd_flags) & 0xffffff;
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+       return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+       int page_shift = (qpc->log_page_size & 0x3f) + 12;
+       int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+       int log_sq_sride = qpc->sq_size_stride & 7;
+       int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+       int log_rq_stride = qpc->rq_size_stride & 7;
+       int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+       int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+       int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+       int sq_size;
+       int rq_size;
+       int total_pages;
+       int total_mem;
+       int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+       sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+       rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+       total_mem = sq_size + rq_size;
+       total_pages =
+               roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+                                  page_shift);
+
+       return total_pages;
+}
+
+static int qp_get_pdn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->pd) & 0xffffff;
+}
+
+static int pdn2slave(int pdn)
+{
+       return (pdn >> NOT_MASKED_PD_BITS) - 1;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+                          int size, struct res_mtt *mtt)
+{
+       int res_start = mtt->com.res_id;
+       int res_size = (1 << mtt->order);
+
+       if (start < res_start || start + size > res_start + res_size)
+               return -EPERM;
+       return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mtt *mtt;
+       struct res_mpt *mpt;
+       int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+       int phys;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+       if (err)
+               return err;
+
+       phys = mr_phys_mpt(inbox->buf);
+       if (!phys) {
+               err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+               if (err)
+                       goto ex_abort;
+
+               err = check_mtt_range(dev, slave, mtt_base,
+                                     mr_get_mtt_size(inbox->buf), mtt);
+               if (err)
+                       goto ex_put;
+
+               mpt->mtt = mtt;
+       }
+
+       if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
+               err = -EPERM;
+               goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put;
+
+       if (!phys) {
+               atomic_inc(&mtt->ref_count);
+               put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       }
+
+       res_end_move(dev, slave, RES_MPT, id);
+       return 0;
+
+ex_put:
+       if (!phys)
+               put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_MPT, id);
+
+       return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mpt *mpt;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+       if (err)
+               return err;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+
+       if (mpt->mtt)
+               atomic_dec(&mpt->mtt->ref_count);
+
+       res_end_move(dev, slave, RES_MPT, id);
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_MPT, id);
+
+       return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mpt *mpt;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = get_res(dev, slave, id, RES_MPT, &mpt);
+       if (err)
+               return err;
+
+       if (mpt->com.from_state != RES_MPT_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+       put_res(dev, slave, id, RES_MPT);
+       return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_mtt *mtt;
+       struct res_qp *qp;
+       struct mlx4_qp_context *qpc = inbox->buf + 8;
+       int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+       int mtt_size = qp_get_mtt_size(qpc);
+       struct res_cq *rcq;
+       struct res_cq *scq;
+       int rcqn = qp_get_rcqn(qpc);
+       int scqn = qp_get_scqn(qpc);
+       u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+       int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+       struct res_srq *srq;
+       int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+       err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+       if (err)
+               return err;
+       qp->local_qpn = local_qpn;
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_abort;
+
+       err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+       if (err)
+               goto ex_put_mtt;
+
+       if (pdn2slave(qp_get_pdn(qpc)) != slave) {
+               err = -EPERM;
+               goto ex_put_mtt;
+       }
+
+       err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+       if (err)
+               goto ex_put_mtt;
+
+       if (scqn != rcqn) {
+               err = get_res(dev, slave, scqn, RES_CQ, &scq);
+               if (err)
+                       goto ex_put_rcq;
+       } else
+               scq = rcq;
+
+       if (use_srq) {
+               err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+               if (err)
+                       goto ex_put_scq;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put_srq;
+       atomic_inc(&mtt->ref_count);
+       qp->mtt = mtt;
+       atomic_inc(&rcq->ref_count);
+       qp->rcq = rcq;
+       atomic_inc(&scq->ref_count);
+       qp->scq = scq;
+
+       if (scqn != rcqn)
+               put_res(dev, slave, scqn, RES_CQ);
+
+       if (use_srq) {
+               atomic_inc(&srq->ref_count);
+               put_res(dev, slave, srqn, RES_SRQ);
+               qp->srq = srq;
+       }
+       put_res(dev, slave, rcqn, RES_CQ);
+       put_res(dev, slave, mtt_base, RES_MTT);
+       res_end_move(dev, slave, RES_QP, qpn);
+
+       return 0;
+
+ex_put_srq:
+       if (use_srq)
+               put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+       if (scqn != rcqn)
+               put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+       put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+       put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_QP, qpn);
+
+       return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+       return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+       int log_eq_size = eqc->log_eq_size & 0x1f;
+       int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+       if (log_eq_size + 5 < page_shift)
+               return 1;
+
+       return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+       return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+       int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+       int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+       if (log_cq_size + 5 < page_shift)
+               return 1;
+
+       return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int eqn = vhcr->in_modifier;
+       int res_id = (slave << 8) | eqn;
+       struct mlx4_eq_context *eqc = inbox->buf;
+       int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+       int mtt_size = eq_get_mtt_size(eqc);
+       struct res_eq *eq;
+       struct res_mtt *mtt;
+
+       err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+       if (err)
+               return err;
+       err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+       if (err)
+               goto out_add;
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto out_move;
+
+       err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+       if (err)
+               goto out_put;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_put;
+
+       atomic_inc(&mtt->ref_count);
+       eq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_EQ, res_id);
+       return 0;
+
+out_put:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+       res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+       rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+       return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+                             int len, struct res_mtt **res)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_mtt *mtt;
+       int err = -EINVAL;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+                           com.list) {
+               if (!check_mtt_range(dev, slave, start, len, mtt)) {
+                       *res = mtt;
+                       mtt->com.from_state = mtt->com.state;
+                       mtt->com.state = RES_MTT_BUSY;
+                       err = 0;
+                       break;
+               }
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_mtt mtt;
+       __be64 *page_list = inbox->buf;
+       u64 *pg_list = (u64 *)page_list;
+       int i;
+       struct res_mtt *rmtt = NULL;
+       int start = be64_to_cpu(page_list[0]);
+       int npages = vhcr->in_modifier;
+       int err;
+
+       err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+       if (err)
+               return err;
+
+       /* Call the SW implementation of write_mtt:
+        * - Prepare a dummy mtt struct
+        * - Translate inbox contents to simple addresses in host endianess */
+       mtt.offset = 0;  /* TBD this is broken but I don't handle it since
+                           we don't really use it */
+       mtt.order = 0;
+       mtt.page_shift = 0;
+       for (i = 0; i < npages; ++i)
+               pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+       err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+                              ((u64 *)page_list + 2));
+
+       if (rmtt)
+               put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+       return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int eqn = vhcr->in_modifier;
+       int res_id = eqn | (slave << 8);
+       struct res_eq *eq;
+       int err;
+
+       err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+       if (err)
+               return err;
+
+       err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+       if (err)
+               goto ex_abort;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put;
+
+       atomic_dec(&eq->mtt->ref_count);
+       put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_EQ, res_id);
+       rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+       return 0;
+
+ex_put:
+       put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_EQ, res_id);
+
+       return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq_info *event_eq;
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_modifier = 0;
+       int err;
+       int res_id;
+       struct res_eq *req;
+
+       if (!priv->mfunc.master.slave_state)
+               return -EINVAL;
+
+       event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+
+       /* Create the event only if the slave is registered */
+       if ((event_eq->event_type & (1 << eqe->type)) == 0)
+               return 0;
+
+       mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       res_id = (slave << 8) | event_eq->eqn;
+       err = get_res(dev, slave, res_id, RES_EQ, &req);
+       if (err)
+               goto unlock;
+
+       if (req->com.from_state != RES_EQ_HW) {
+               err = -EINVAL;
+               goto put;
+       }
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto put;
+       }
+
+       if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+               ++event_eq->token;
+               eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+       }
+
+       memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+       in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+       err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+                      MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
+
+       put_res(dev, slave, res_id, RES_EQ);
+       mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+
+put:
+       put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+       mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int eqn = vhcr->in_modifier;
+       int res_id = eqn | (slave << 8);
+       struct res_eq *eq;
+       int err;
+
+       err = get_res(dev, slave, res_id, RES_EQ, &eq);
+       if (err)
+               return err;
+
+       if (eq->com.from_state != RES_EQ_HW) {
+               err = -EINVAL;
+               goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+       put_res(dev, slave, res_id, RES_EQ);
+       return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int cqn = vhcr->in_modifier;
+       struct mlx4_cq_context *cqc = inbox->buf;
+       int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+       struct res_cq *cq;
+       struct res_mtt *mtt;
+
+       err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+       if (err)
+               return err;
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto out_move;
+       err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+       if (err)
+               goto out_put;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_put;
+       atomic_inc(&mtt->ref_count);
+       cq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_CQ, cqn);
+       return 0;
+
+out_put:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+       res_abort_move(dev, slave, RES_CQ, cqn);
+       return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+
+       err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_move;
+       atomic_dec(&cq->mtt->ref_count);
+       res_end_move(dev, slave, RES_CQ, cqn);
+       return 0;
+
+out_move:
+       res_abort_move(dev, slave, RES_CQ, cqn);
+       return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+       int err;
+
+       err = get_res(dev, slave, cqn, RES_CQ, &cq);
+       if (err)
+               return err;
+
+       if (cq->com.from_state != RES_CQ_HW)
+               goto ex_put;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+       put_res(dev, slave, cqn, RES_CQ);
+
+       return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd,
+                        struct res_cq *cq)
+{
+       int err;
+       struct res_mtt *orig_mtt;
+       struct res_mtt *mtt;
+       struct mlx4_cq_context *cqc = inbox->buf;
+       int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+       err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+       if (err)
+               return err;
+
+       if (orig_mtt != cq->mtt) {
+               err = -EINVAL;
+               goto ex_put;
+       }
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_put;
+
+       err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+       if (err)
+               goto ex_put1;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put1;
+       atomic_dec(&orig_mtt->ref_count);
+       put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+       atomic_inc(&mtt->ref_count);
+       cq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       return 0;
+
+ex_put1:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+       put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+       return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+       int err;
+
+       err = get_res(dev, slave, cqn, RES_CQ, &cq);
+       if (err)
+               return err;
+
+       if (cq->com.from_state != RES_CQ_HW)
+               goto ex_put;
+
+       if (vhcr->op_modifier == 0) {
+               err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+               if (err)
+                       goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+       put_res(dev, slave, cqn, RES_CQ);
+
+       return err;
+}
+
+static int srq_get_pdn(struct mlx4_srq_context *srqc)
+{
+       return be32_to_cpu(srqc->pd) & 0xffffff;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+       int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+       int log_rq_stride = srqc->logstride & 7;
+       int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+       if (log_srq_size + log_rq_stride + 4 < page_shift)
+               return 1;
+
+       return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_mtt *mtt;
+       struct res_srq *srq;
+       struct mlx4_srq_context *srqc = inbox->buf;
+       int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+       if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+               return -EINVAL;
+
+       err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+       if (err)
+               return err;
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_abort;
+       err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+                             mtt);
+       if (err)
+               goto ex_put_mtt;
+
+       if (pdn2slave(srq_get_pdn(srqc)) != slave) {
+               err = -EPERM;
+               goto ex_put_mtt;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put_mtt;
+
+       atomic_inc(&mtt->ref_count);
+       srq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_SRQ, srqn);
+       return 0;
+
+ex_put_mtt:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_SRQ, srqn);
+
+       return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+       atomic_dec(&srq->mtt->ref_count);
+       if (srq->cq)
+               atomic_dec(&srq->cq->ref_count);
+       res_end_move(dev, slave, RES_SRQ, srqn);
+
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_SRQ, srqn);
+
+       return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+       if (err)
+               return err;
+       if (srq->com.from_state != RES_SRQ_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, srqn, RES_SRQ);
+       return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+       if (err)
+               return err;
+
+       if (srq->com.from_state != RES_SRQ_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, srqn, RES_SRQ);
+       return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+
+       err = get_res(dev, slave, qpn, RES_QP, &qp);
+       if (err)
+               return err;
+       if (qp->com.from_state != RES_QP_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+       update_ud_gid(dev, qpc, (u8)slave);
+
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+
+       err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+
+       atomic_dec(&qp->mtt->ref_count);
+       atomic_dec(&qp->rcq->ref_count);
+       atomic_dec(&qp->scq->ref_count);
+       if (qp->srq)
+               atomic_dec(&qp->srq->ref_count);
+       res_end_move(dev, slave, RES_QP, qpn);
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_QP, qpn);
+
+       return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+                               struct res_qp *rqp, u8 *gid)
+{
+       struct res_gid *res;
+
+       list_for_each_entry(res, &rqp->mcg_list, list) {
+               if (!memcmp(res->gid, gid, 16))
+                       return res;
+       }
+       return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+                      u8 *gid, enum mlx4_protocol prot)
+{
+       struct res_gid *res;
+       int err;
+
+       res = kzalloc(sizeof *res, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+
+       spin_lock_irq(&rqp->mcg_spl);
+       if (find_gid(dev, slave, rqp, gid)) {
+               kfree(res);
+               err = -EEXIST;
+       } else {
+               memcpy(res->gid, gid, 16);
+               res->prot = prot;
+               list_add_tail(&res->list, &rqp->mcg_list);
+               err = 0;
+       }
+       spin_unlock_irq(&rqp->mcg_spl);
+
+       return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+                      u8 *gid, enum mlx4_protocol prot)
+{
+       struct res_gid *res;
+       int err;
+
+       spin_lock_irq(&rqp->mcg_spl);
+       res = find_gid(dev, slave, rqp, gid);
+       if (!res || res->prot != prot)
+               err = -EINVAL;
+       else {
+               list_del(&res->list);
+               kfree(res);
+               err = 0;
+       }
+       spin_unlock_irq(&rqp->mcg_spl);
+
+       return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp qp; /* dummy for calling attach/detach */
+       u8 *gid = inbox->buf;
+       enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+       int err, err1;
+       int qpn;
+       struct res_qp *rqp;
+       int attach = vhcr->op_modifier;
+       int block_loopback = vhcr->in_modifier >> 31;
+       u8 steer_type_mask = 2;
+       enum mlx4_steer_type type = gid[7] & steer_type_mask;
+
+       qpn = vhcr->in_modifier & 0xffffff;
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err)
+               return err;
+
+       qp.qpn = qpn;
+       if (attach) {
+               err = add_mcg_res(dev, slave, rqp, gid, prot);
+               if (err)
+                       goto ex_put;
+
+               err = mlx4_qp_attach_common(dev, &qp, gid,
+                                           block_loopback, prot, type);
+               if (err)
+                       goto ex_rem;
+       } else {
+               err = rem_mcg_res(dev, slave, rqp, gid, prot);
+               if (err)
+                       goto ex_put;
+               err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+       }
+
+       put_res(dev, slave, qpn, RES_QP);
+       return 0;
+
+ex_rem:
+       /* ignore error return below, already in error */
+       err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
+ex_put:
+       put_res(dev, slave, qpn, RES_QP);
+
+       return err;
+}
+
+enum {
+       BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier & 0xffff;
+
+       err = get_res(dev, slave, index, RES_COUNTER, NULL);
+       if (err)
+               return err;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       put_res(dev, slave, index, RES_COUNTER);
+       return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+       struct res_gid *rgid;
+       struct res_gid *tmp;
+       int err;
+       struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+       list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+               qp.qpn = rqp->local_qpn;
+               err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+                                           MLX4_MC_STEER);
+               list_del(&rgid->list);
+               kfree(rgid);
+       }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+                         enum mlx4_resource type, int print)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker =
+               &priv->mfunc.master.res_tracker;
+       struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+       struct res_common *r;
+       struct res_common *tmp;
+       int busy;
+
+       busy = 0;
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(r, tmp, rlist, list) {
+               if (r->owner == slave) {
+                       if (!r->removing) {
+                               if (r->state == RES_ANY_BUSY) {
+                                       if (print)
+                                               mlx4_dbg(dev,
+                                                        "%s id 0x%x is busy\n",
+                                                         ResourceType(type),
+                                                         r->res_id);
+                                       ++busy;
+                               } else {
+                                       r->from_state = r->state;
+                                       r->state = RES_ANY_BUSY;
+                                       r->removing = 1;
+                               }
+                       }
+               }
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+                        enum mlx4_resource type)
+{
+       unsigned long begin;
+       int busy;
+
+       begin = jiffies;
+       do {
+               busy = _move_all_busy(dev, slave, type, 0);
+               if (time_after(jiffies, begin + 5 * HZ))
+                       break;
+               if (busy)
+                       cond_resched();
+       } while (busy);
+
+       if (busy)
+               busy = _move_all_busy(dev, slave, type, 1);
+
+       return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *qp_list =
+               &tracker->slave_list[slave].res_list[RES_QP];
+       struct res_qp *qp;
+       struct res_qp *tmp;
+       int state;
+       u64 in_param;
+       int qpn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_QP);
+       if (err)
+               mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+                         "for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (qp->com.owner == slave) {
+                       qpn = qp->com.res_id;
+                       detach_qp(dev, slave, qp);
+                       state = qp->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_QP_RESERVED:
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_QP],
+                                                         qp->com.res_id);
+                                       list_del(&qp->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(qp);
+                                       state = 0;
+                                       break;
+                               case RES_QP_MAPPED:
+                                       if (!valid_reserved(dev, slave, qpn))
+                                               __mlx4_qp_free_icm(dev, qpn);
+                                       state = RES_QP_RESERVED;
+                                       break;
+                               case RES_QP_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param,
+                                                      qp->local_qpn, 2,
+                                                      MLX4_CMD_2RST_QP,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_qps: failed"
+                                                        " to move slave %d qpn %d to"
+                                                        " reset\n", slave,
+                                                        qp->local_qpn);
+                                       atomic_dec(&qp->rcq->ref_count);
+                                       atomic_dec(&qp->scq->ref_count);
+                                       atomic_dec(&qp->mtt->ref_count);
+                                       if (qp->srq)
+                                               atomic_dec(&qp->srq->ref_count);
+                                       state = RES_QP_MAPPED;
+                                       break;
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *srq_list =
+               &tracker->slave_list[slave].res_list[RES_SRQ];
+       struct res_srq *srq;
+       struct res_srq *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int srqn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_SRQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (srq->com.owner == slave) {
+                       srqn = srq->com.res_id;
+                       state = srq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_SRQ_ALLOCATED:
+                                       __mlx4_srq_free_icm(dev, srqn);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_SRQ],
+                                                         srqn);
+                                       list_del(&srq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(srq);
+                                       state = 0;
+                                       break;
+
+                               case RES_SRQ_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, srqn, 1,
+                                                      MLX4_CMD_HW2SW_SRQ,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_srqs: failed"
+                                                        " to move slave %d srq %d to"
+                                                        " SW ownership\n",
+                                                        slave, srqn);
+
+                                       atomic_dec(&srq->mtt->ref_count);
+                                       if (srq->cq)
+                                               atomic_dec(&srq->cq->ref_count);
+                                       state = RES_SRQ_ALLOCATED;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *cq_list =
+               &tracker->slave_list[slave].res_list[RES_CQ];
+       struct res_cq *cq;
+       struct res_cq *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int cqn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_CQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+                       cqn = cq->com.res_id;
+                       state = cq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_CQ_ALLOCATED:
+                                       __mlx4_cq_free_icm(dev, cqn);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_CQ],
+                                                         cqn);
+                                       list_del(&cq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(cq);
+                                       state = 0;
+                                       break;
+
+                               case RES_CQ_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, cqn, 1,
+                                                      MLX4_CMD_HW2SW_CQ,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_cqs: failed"
+                                                        " to move slave %d cq %d to"
+                                                        " SW ownership\n",
+                                                        slave, cqn);
+                                       atomic_dec(&cq->mtt->ref_count);
+                                       state = RES_CQ_ALLOCATED;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mpt_list =
+               &tracker->slave_list[slave].res_list[RES_MPT];
+       struct res_mpt *mpt;
+       struct res_mpt *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int mptn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_MPT);
+       if (err)
+               mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (mpt->com.owner == slave) {
+                       mptn = mpt->com.res_id;
+                       state = mpt->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_MPT_RESERVED:
+                                       __mlx4_mr_release(dev, mpt->key);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_MPT],
+                                                         mptn);
+                                       list_del(&mpt->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(mpt);
+                                       state = 0;
+                                       break;
+
+                               case RES_MPT_MAPPED:
+                                       __mlx4_mr_free_icm(dev, mpt->key);
+                                       state = RES_MPT_RESERVED;
+                                       break;
+
+                               case RES_MPT_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, mptn, 0,
+                                                    MLX4_CMD_HW2SW_MPT,
+                                                    MLX4_CMD_TIME_CLASS_A,
+                                                    MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_mrs: failed"
+                                                        " to move slave %d mpt %d to"
+                                                        " SW ownership\n",
+                                                        slave, mptn);
+                                       if (mpt->mtt)
+                                               atomic_dec(&mpt->mtt->ref_count);
+                                       state = RES_MPT_MAPPED;
+                                       break;
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker =
+               &priv->mfunc.master.res_tracker;
+       struct list_head *mtt_list =
+               &tracker->slave_list[slave].res_list[RES_MTT];
+       struct res_mtt *mtt;
+       struct res_mtt *tmp;
+       int state;
+       LIST_HEAD(tlist);
+       int base;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_MTT);
+       if (err)
+               mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (mtt->com.owner == slave) {
+                       base = mtt->com.res_id;
+                       state = mtt->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_MTT_ALLOCATED:
+                                       __mlx4_free_mtt_range(dev, base,
+                                                             mtt->order);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_MTT],
+                                                         base);
+                                       list_del(&mtt->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(mtt);
+                                       state = 0;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *eq_list =
+               &tracker->slave_list[slave].res_list[RES_EQ];
+       struct res_eq *eq;
+       struct res_eq *tmp;
+       int err;
+       int state;
+       LIST_HEAD(tlist);
+       int eqn;
+       struct mlx4_cmd_mailbox *mailbox;
+
+       err = move_all_busy(dev, slave, RES_EQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (eq->com.owner == slave) {
+                       eqn = eq->com.res_id;
+                       state = eq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_EQ_RESERVED:
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_EQ],
+                                                         eqn);
+                                       list_del(&eq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(eq);
+                                       state = 0;
+                                       break;
+
+                               case RES_EQ_HW:
+                                       mailbox = mlx4_alloc_cmd_mailbox(dev);
+                                       if (IS_ERR(mailbox)) {
+                                               cond_resched();
+                                               continue;
+                                       }
+                                       err = mlx4_cmd_box(dev, slave, 0,
+                                                          eqn & 0xff, 0,
+                                                          MLX4_CMD_HW2SW_EQ,
+                                                          MLX4_CMD_TIME_CLASS_A,
+                                                          MLX4_CMD_NATIVE);
+                                       mlx4_dbg(dev, "rem_slave_eqs: failed"
+                                                " to move slave %d eqs %d to"
+                                                " SW ownership\n", slave, eqn);
+                                       mlx4_free_cmd_mailbox(dev, mailbox);
+                                       if (!err) {
+                                               atomic_dec(&eq->mtt->ref_count);
+                                               state = RES_EQ_RESERVED;
+                                       }
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+       /*VLAN*/
+       rem_slave_macs(dev, slave);
+       rem_slave_qps(dev, slave);
+       rem_slave_srqs(dev, slave);
+       rem_slave_cqs(dev, slave);
+       rem_slave_mrs(dev, slave);
+       rem_slave_eqs(dev, slave);
+       rem_slave_mtts(dev, slave);
+       mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
index e2337a7411d943409fc881d911b21d2c907bcc18..802498293528307b5c2e123eae089f84929759ea 100644 (file)
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
        int err = 0;
 
        err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
-                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err) {
                mlx4_err(dev, "Sense command failed for port: %d\n", port);
                return err;
index 9cbf3fce0145c3f8d8518e125bbb46295669e9fb..2823fffc6383989c985e54884b826cdf69ca50fc 100644 (file)
@@ -31,6 +31,8 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_srq_context {
-       __be32                  state_logsize_srqn;
-       u8                      logstride;
-       u8                      reserved1;
-       __be16                  xrcd;
-       __be32                  pg_offset_cqn;
-       u32                     reserved2;
-       u8                      log_page_size;
-       u8                      reserved3[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  pd;
-       __be16                  limit_watermark;
-       __be16                  wqe_cnt;
-       u16                     reserved4;
-       __be16                  wqe_counter;
-       u32                     reserved5;
-       __be64                  db_rec_addr;
-};
-
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 {
        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int srq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+                       MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
 {
        return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
                            mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
 {
        return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int srq_num)
 {
        return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
-                  struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
 {
        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_srq_context *srq_context;
-       u64 mtt_addr;
        int err;
 
-       srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
-       if (srq->srqn == -1)
+
+       *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+       if (*srqn == -1)
                return -ENOMEM;
 
-       err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+       err = mlx4_table_get(dev, &srq_table->table, *srqn);
        if (err)
                goto err_out;
 
-       err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+       err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
        if (err)
                goto err_put;
+       return 0;
+
+err_put:
+       mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+       mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+       return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+                                  RES_OP_RESERVE_AND_MAP,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (!err)
+                       *srqn = get_param_l(&out_param);
+
+               return err;
+       }
+       return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+       mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+       mlx4_table_put(dev, &srq_table->table, srqn);
+       mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, srqn);
+               if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+                            MLX4_CMD_FREE_RES,
+                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+               return;
+       }
+       __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+                  struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_srq_context *srq_context;
+       u64 mtt_addr;
+       int err;
+
+       err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+       if (err)
+               return err;
 
        spin_lock_irq(&srq_table->lock);
        err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
        spin_unlock_irq(&srq_table->lock);
        if (err)
-               goto err_cmpt_put;
+               goto err_icm;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
        radix_tree_delete(&srq_table->tree, srq->srqn);
        spin_unlock_irq(&srq_table->lock);
 
-err_cmpt_put:
-       mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+       mlx4_srq_free_icm(dev, srq->srqn);
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
                complete(&srq->free);
        wait_for_completion(&srq->free);
 
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+       mlx4_srq_free_icm(dev, srq->srqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_free);
 
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
        spin_lock_init(&srq_table->lock);
        INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
                               dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
 }
index d10c2e15f4ed026aacdd101dd5bd82dbba985e01..1ea811cf515bd11e9bd1fa0f6d46fd5bce444c0b 100644 (file)
@@ -42,6 +42,8 @@ config KS8851
        select NET_CORE
        select MII
        select CRC32
+       select MISC_DEVICES
+       select EEPROM_93CX6
        ---help---
          SPI driver for Micrel KS8851 SPI attached network chip.
 
index 4a6ae057e3b1ad408335c47644647bc1207ac70f..75ec87a822b8e2d1ee12486bc6cfcbf0f3e0a248 100644 (file)
@@ -1264,18 +1264,7 @@ static struct platform_driver ks8842_platform_driver = {
        .remove         = ks8842_remove,
 };
 
-static int __init ks8842_init(void)
-{
-       return platform_driver_register(&ks8842_platform_driver);
-}
-
-static void __exit ks8842_exit(void)
-{
-       platform_driver_unregister(&ks8842_platform_driver);
-}
-
-module_init(ks8842_init);
-module_exit(ks8842_exit);
+module_platform_driver(ks8842_platform_driver);
 
 MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
index f56743a28fc0bfb219971cd17b97a7c4127e7d67..6b35e7da9a9c7a094f27ff725787865b7bd034e6 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/cache.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
+#include <linux/eeprom_93cx6.h>
 
 #include <linux/spi/spi.h>
 
@@ -82,6 +83,7 @@ union ks8851_tx_hdr {
  * @rc_ccr: Cached copy of KS_CCR.
  * @rc_rxqcr: Cached copy of KS_RXQCR.
  * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
  *
  * The @lock ensures that the chip is protected when certain operations are
  * in progress. When the read or write packet transfer is in progress, most
@@ -128,6 +130,8 @@ struct ks8851_net {
        struct spi_message      spi_msg2;
        struct spi_transfer     spi_xfer1;
        struct spi_transfer     spi_xfer2[2];
+
+       struct eeprom_93cx6     eeprom;
 };
 
 static int msg_enable;
@@ -342,6 +346,26 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
        mdelay(1);      /* wait for condition to clear */
 }
 
+/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+       unsigned pmecr;
+
+       netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+       pmecr = ks8851_rdreg16(ks, KS_PMECR);
+       pmecr &= ~PMECR_PM_MASK;
+       pmecr |= pwrmode;
+
+       ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
 /**
  * ks8851_write_mac_addr - write mac address to device registers
  * @dev: The network device
@@ -358,30 +382,63 @@ static int ks8851_write_mac_addr(struct net_device *dev)
 
        mutex_lock(&ks->lock);
 
+       /*
+        * Wake up chip in case it was powered off when stopped; otherwise,
+        * the first write to the MAC address does not take effect.
+        */
+       ks8851_set_powermode(ks, PMECR_PM_NORMAL);
        for (i = 0; i < ETH_ALEN; i++)
                ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+       if (!netif_running(dev))
+               ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
 
        mutex_unlock(&ks->lock);
 
        return 0;
 }
 
+/**
+ * ks8851_read_mac_addr - read mac address from device registers
+ * @dev: The network device
+ *
+ * Update our copy of the KS8851 MAC address from the registers of @dev.
+*/
+static void ks8851_read_mac_addr(struct net_device *dev)
+{
+       struct ks8851_net *ks = netdev_priv(dev);
+       int i;
+
+       mutex_lock(&ks->lock);
+
+       for (i = 0; i < ETH_ALEN; i++)
+               dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+
+       mutex_unlock(&ks->lock);
+}
+
 /**
  * ks8851_init_mac - initialise the mac address
  * @ks: The device structure
  *
  * Get or create the initial mac address for the device and then set that
- * into the station address register. Currently we assume that the device
- * does not have a valid mac address in it, and so we use random_ether_addr()
+ * into the station address register. If there is an EEPROM present, then
+ * we try that. If no valid mac address is found we use random_ether_addr()
  * to create a new one.
- *
- * In future, the driver should check to see if the device has an EEPROM
- * attached and whether that has a valid ethernet address in it.
  */
 static void ks8851_init_mac(struct ks8851_net *ks)
 {
        struct net_device *dev = ks->netdev;
 
+       /* first, try reading what we've got already */
+       if (ks->rc_ccr & CCR_EEPROM) {
+               ks8851_read_mac_addr(dev);
+               if (is_valid_ether_addr(dev->dev_addr))
+                       return;
+
+               netdev_err(ks->netdev, "invalid mac address read %pM\n",
+                               dev->dev_addr);
+       }
+
        random_ether_addr(dev->dev_addr);
        ks8851_write_mac_addr(dev);
 }
@@ -738,26 +795,6 @@ static void ks8851_tx_work(struct work_struct *work)
        mutex_unlock(&ks->lock);
 }
 
-/**
- * ks8851_set_powermode - set power mode of the device
- * @ks: The device state
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
-{
-       unsigned pmecr;
-
-       netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
-       pmecr = ks8851_rdreg16(ks, KS_PMECR);
-       pmecr &= ~PMECR_PM_MASK;
-       pmecr |= pwrmode;
-
-       ks8851_wrreg16(ks, KS_PMECR, pmecr);
-}
-
 /**
  * ks8851_net_open - open network device
  * @dev: The network device being opened.
@@ -1038,234 +1075,6 @@ static const struct net_device_ops ks8851_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-/* Companion eeprom access */
-
-enum { /* EEPROM programming states */
-       EEPROM_CONTROL,
-       EEPROM_ADDRESS,
-       EEPROM_DATA,
-       EEPROM_COMPLETE
-};
-
-/**
- * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @addr: EEPROM address to read
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
- * Warning: The READ feature is not supported on ks8851 revision 0.
- *
- * Rough programming model:
- *  - on period start: set clock high and read value on bus
- *  - on period / 2: set clock low and program value on bus
- *  - start on period / 2
- */
-unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
-{
-       struct ks8851_net *ks = netdev_priv(dev);
-       int eepcr;
-       int ctrl = EEPROM_OP_READ;
-       int state = EEPROM_CONTROL;
-       int bit_count = EEPROM_OP_LEN - 1;
-       unsigned int data = 0;
-       int dummy;
-       unsigned int addr_len;
-
-       addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
-       /* start transaction: chip select high, authorize write */
-       mutex_lock(&ks->lock);
-       eepcr = EEPCR_EESA | EEPCR_EESRWA;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr |= EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-       while (state != EEPROM_COMPLETE) {
-               /* falling clock period starts... */
-               /* set EED_IO pin for control and address */
-               eepcr &= ~EEPCR_EEDO;
-               switch (state) {
-               case EEPROM_CONTROL:
-                       eepcr |= ((ctrl >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0) {
-                               bit_count = addr_len - 1;
-                               state = EEPROM_ADDRESS;
-                       }
-                       break;
-               case EEPROM_ADDRESS:
-                       eepcr |= ((addr >> bit_count) & 1) << 2;
-                       bit_count--;
-                       break;
-               case EEPROM_DATA:
-                       /* Change to receive mode */
-                       eepcr &= ~EEPCR_EESRWA;
-                       break;
-               }
-
-               /* lower clock  */
-               eepcr &= ~EEPCR_EESCK;
-
-               mutex_lock(&ks->lock);
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* waitread period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-
-               /* rising clock period starts... */
-
-               /* raise clock */
-               mutex_lock(&ks->lock);
-               eepcr |= EEPCR_EESCK;
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* Manage read */
-               switch (state) {
-               case EEPROM_ADDRESS:
-                       if (bit_count < 0) {
-                               bit_count = EEPROM_DATA_LEN - 1;
-                               state = EEPROM_DATA;
-                       }
-                       break;
-               case EEPROM_DATA:
-                       mutex_lock(&ks->lock);
-                       dummy = ks8851_rdreg16(ks, KS_EEPCR);
-                       mutex_unlock(&ks->lock);
-                       data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
-                       if (bit_count-- <= 0)
-                               state = EEPROM_COMPLETE;
-                       break;
-               }
-
-               /* wait period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-       }
-
-       /* close transaction */
-       mutex_lock(&ks->lock);
-       eepcr &= ~EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr = 0;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-       return data;
-}
-
-/**
- * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @op: operand (can be WRITE, EWEN, EWDS)
- * @addr: EEPROM address to write
- * @data: data to write
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
- *
- * Note that a write enable is required before writing data.
- *
- * Rough programming model:
- *  - on period start: set clock high
- *  - on period / 2: set clock low and program value on bus
- *  - start on period / 2
- */
-void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
-                                       unsigned int addr, unsigned int data)
-{
-       struct ks8851_net *ks = netdev_priv(dev);
-       int eepcr;
-       int state = EEPROM_CONTROL;
-       int bit_count = EEPROM_OP_LEN - 1;
-       unsigned int addr_len;
-
-       addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
-       switch (op) {
-       case EEPROM_OP_EWEN:
-               addr = 0x30;
-       break;
-       case EEPROM_OP_EWDS:
-               addr = 0;
-               break;
-       }
-
-       /* start transaction: chip select high, authorize write */
-       mutex_lock(&ks->lock);
-       eepcr = EEPCR_EESA | EEPCR_EESRWA;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr |= EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-       while (state != EEPROM_COMPLETE) {
-               /* falling clock period starts... */
-               /* set EED_IO pin for control and address */
-               eepcr &= ~EEPCR_EEDO;
-               switch (state) {
-               case EEPROM_CONTROL:
-                       eepcr |= ((op >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0) {
-                               bit_count = addr_len - 1;
-                               state = EEPROM_ADDRESS;
-                       }
-                       break;
-               case EEPROM_ADDRESS:
-                       eepcr |= ((addr >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0) {
-                               if (op == EEPROM_OP_WRITE) {
-                                       bit_count = EEPROM_DATA_LEN - 1;
-                                       state = EEPROM_DATA;
-                               } else {
-                                       state = EEPROM_COMPLETE;
-                               }
-                       }
-                       break;
-               case EEPROM_DATA:
-                       eepcr |= ((data >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0)
-                               state = EEPROM_COMPLETE;
-                       break;
-               }
-
-               /* lower clock  */
-               eepcr &= ~EEPCR_EESCK;
-
-               mutex_lock(&ks->lock);
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* wait period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-
-               /* rising clock period starts... */
-
-               /* raise clock */
-               eepcr |= EEPCR_EESCK;
-               mutex_lock(&ks->lock);
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* wait period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-       }
-
-       /* close transaction */
-       mutex_lock(&ks->lock);
-       eepcr &= ~EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr = 0;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-}
-
 /* ethtool support */
 
 static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1312,115 +1121,141 @@ static int ks8851_nway_reset(struct net_device *dev)
        return mii_nway_restart(&ks->mii);
 }
 
-static int ks8851_get_eeprom_len(struct net_device *dev)
-{
-       struct ks8851_net *ks = netdev_priv(dev);
-       return ks->eeprom_size;
-}
+/* EEPROM support */
 
-static int ks8851_get_eeprom(struct net_device *dev,
-                           struct ethtool_eeprom *eeprom, u8 *bytes)
+static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
 {
-       struct ks8851_net *ks = netdev_priv(dev);
-       u16 *eeprom_buff;
-       int first_word;
-       int last_word;
-       int ret_val = 0;
-       u16 i;
-
-       if (eeprom->len == 0)
-               return -EINVAL;
+       struct ks8851_net *ks = ee->data;
+       unsigned val;
 
-       if (eeprom->len > ks->eeprom_size)
-               return -EINVAL;
+       val = ks8851_rdreg16(ks, KS_EEPCR);
 
-       eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+       ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
+       ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
+       ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
+}
 
-       first_word = eeprom->offset >> 1;
-       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
+{
+       struct ks8851_net *ks = ee->data;
+       unsigned val = EEPCR_EESA;      /* default - eeprom access on */
+
+       if (ee->drive_data)
+               val |= EEPCR_EESRWA;
+       if (ee->reg_data_in)
+               val |= EEPCR_EEDO;
+       if (ee->reg_data_clock)
+               val |= EEPCR_EESCK;
+       if (ee->reg_chip_select)
+               val |= EEPCR_EECS;
+
+       ks8851_wrreg16(ks, KS_EEPCR, val);
+}
 
-       eeprom_buff = kmalloc(sizeof(u16) *
-                       (last_word - first_word + 1), GFP_KERNEL);
-       if (!eeprom_buff)
-               return -ENOMEM;
+/**
+ * ks8851_eeprom_claim - claim device EEPROM and activate the interface
+ * @ks: The network device state.
+ *
+ * Check for the presence of an EEPROM, and then activate software access
+ * to the device.
+ */
+static int ks8851_eeprom_claim(struct ks8851_net *ks)
+{
+       if (!(ks->rc_ccr & CCR_EEPROM))
+               return -ENOENT;
 
-       for (i = 0; i < last_word - first_word + 1; i++)
-               eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+       mutex_lock(&ks->lock);
 
-       /* Device's eeprom is little-endian, word addressable */
-       for (i = 0; i < last_word - first_word + 1; i++)
-               le16_to_cpus(&eeprom_buff[i]);
+       /* start with clock low, cs high */
+       ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
+       return 0;
+}
 
-       memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
-       kfree(eeprom_buff);
+/**
+ * ks8851_eeprom_release - release the EEPROM interface
+ * @ks: The device state
+ *
+ * Release the software access to the device EEPROM
+ */
+static void ks8851_eeprom_release(struct ks8851_net *ks)
+{
+       unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
 
-       return ret_val;
+       ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
+       mutex_unlock(&ks->lock);
 }
 
+#define KS_EEPROM_MAGIC (0x00008851)
+
 static int ks8851_set_eeprom(struct net_device *dev,
-                           struct ethtool_eeprom *eeprom, u8 *bytes)
+                            struct ethtool_eeprom *ee, u8 *data)
 {
        struct ks8851_net *ks = netdev_priv(dev);
-       u16 *eeprom_buff;
-       void *ptr;
-       int max_len;
-       int first_word;
-       int last_word;
-       int ret_val = 0;
-       u16 i;
-
-       if (eeprom->len == 0)
-               return -EOPNOTSUPP;
-
-       if (eeprom->len > ks->eeprom_size)
+       int offset = ee->offset;
+       int len = ee->len;
+       u16 tmp;
+
+       /* currently only support byte writing */
+       if (len != 1)
                return -EINVAL;
 
-       if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
-               return -EFAULT;
+       if (ee->magic != KS_EEPROM_MAGIC)
+               return -EINVAL;
 
-       first_word = eeprom->offset >> 1;
-       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-       max_len = (last_word - first_word + 1) * 2;
-       eeprom_buff = kmalloc(max_len, GFP_KERNEL);
-       if (!eeprom_buff)
-               return -ENOMEM;
+       if (ks8851_eeprom_claim(ks))
+               return -ENOENT;
 
-       ptr = (void *)eeprom_buff;
+       eeprom_93cx6_wren(&ks->eeprom, true);
 
-       if (eeprom->offset & 1) {
-               /* need read/modify/write of first changed EEPROM word */
-               /* only the second byte of the word is being modified */
-               eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
-               ptr++;
+       /* ethtool currently only supports writing bytes, which means
+        * we have to read/modify/write our 16bit EEPROMs */
+
+       eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
+
+       if (offset & 1) {
+               tmp &= 0xff;
+               tmp |= *data << 8;
+       } else {
+               tmp &= 0xff00;
+               tmp |= *data;
        }
-       if ((eeprom->offset + eeprom->len) & 1)
-               /* need read/modify/write of last changed EEPROM word */
-               /* only the first byte of the word is being modified */
-               eeprom_buff[last_word - first_word] =
-                                       ks8851_eeprom_read(dev, last_word);
 
+       eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
+       eeprom_93cx6_wren(&ks->eeprom, false);
 
-       /* Device's eeprom is little-endian, word addressable */
-       le16_to_cpus(&eeprom_buff[0]);
-       le16_to_cpus(&eeprom_buff[last_word - first_word]);
+       ks8851_eeprom_release(ks);
 
-       memcpy(ptr, bytes, eeprom->len);
+       return 0;
+}
 
-       for (i = 0; i < last_word - first_word + 1; i++)
-               eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+static int ks8851_get_eeprom(struct net_device *dev,
+                            struct ethtool_eeprom *ee, u8 *data)
+{
+       struct ks8851_net *ks = netdev_priv(dev);
+       int offset = ee->offset;
+       int len = ee->len;
 
-       ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+       /* must be 2 byte aligned */
+       if (len & 1 || offset & 1)
+               return -EINVAL;
 
-       for (i = 0; i < last_word - first_word + 1; i++) {
-               ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
-                                                       eeprom_buff[i]);
-               mdelay(EEPROM_WRITE_TIME);
-       }
+       if (ks8851_eeprom_claim(ks))
+               return -ENOENT;
+
+       ee->magic = KS_EEPROM_MAGIC;
 
-       ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+       eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+       ks8851_eeprom_release(ks);
 
-       kfree(eeprom_buff);
-       return ret_val;
+       return 0;
+}
+
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+       struct ks8851_net *ks = netdev_priv(dev);
+
+       /* currently, we assume it is an 93C46 attached, so return 128 */
+       return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
 }
 
 static const struct ethtool_ops ks8851_ethtool_ops = {
@@ -1613,6 +1448,13 @@ static int __devinit ks8851_probe(struct spi_device *spi)
        spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
        spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
 
+       /* setup EEPROM state */
+
+       ks->eeprom.data = ks;
+       ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
+       ks->eeprom.register_read = ks8851_eeprom_regread;
+       ks->eeprom.register_write = ks8851_eeprom_regwrite;
+
        /* setup mii state */
        ks->mii.dev             = ndev;
        ks->mii.phy_id          = 1,
@@ -1674,9 +1516,10 @@ static int __devinit ks8851_probe(struct spi_device *spi)
                goto err_netdev;
        }
 
-       netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+       netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
                    CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
-                   ndev->dev_addr, ndev->irq);
+                   ndev->dev_addr, ndev->irq,
+                   ks->rc_ccr & CCR_EEPROM ? "has" : "no");
 
        return 0;
 
index 537fb06e593200144c8b1d09c1aae732416748e1..b0fae86aacad05315f55294109a605388d0f0192 100644 (file)
@@ -16,7 +16,7 @@
 #define CCR_32PIN                              (1 << 0)
 
 /* MAC address registers */
-#define KS_MAR(_m)                             0x15 - (_m)
+#define KS_MAR(_m)                             (0x15 - (_m))
 #define KS_MARL                                        0x10
 #define KS_MARM                                        0x12
 #define KS_MARH                                        0x14
 #define KS_EEPCR                               0x22
 #define EEPCR_EESRWA                           (1 << 5)
 #define EEPCR_EESA                             (1 << 4)
-#define EEPCR_EESB_OFFSET                      3
-#define EEPCR_EESB                             (1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EESB                             (1 << 3)
 #define EEPCR_EEDO                             (1 << 2)
 #define EEPCR_EESCK                            (1 << 1)
 #define EEPCR_EECS                             (1 << 0)
 
-#define EEPROM_OP_LEN                          3       /* bits:*/
-#define EEPROM_OP_READ                         0x06
-#define EEPROM_OP_EWEN                         0x04
-#define EEPROM_OP_WRITE                                0x05
-#define EEPROM_OP_EWDS                         0x14
-
-#define EEPROM_DATA_LEN                                16      /* 16 bits EEPROM */
-#define EEPROM_WRITE_TIME                      4       /* wrt ack time in ms */
-#define EEPROM_SK_PERIOD                       400     /* in us */
-
 #define KS_MBIR                                        0x24
 #define MBIR_TXMBF                             (1 << 12)
 #define MBIR_TXMBFA                            (1 << 11)
index d19c849059d80755cacb8093de0668776216401d..e58e78e5c930048d0dccf4202478b7da4d227b91 100644 (file)
@@ -1500,8 +1500,7 @@ static int ks_hw_init(struct ks_net *ks)
        ks->all_mcast = 0;
        ks->mcast_lst_size = 0;
 
-       ks->frame_head_info = (struct type_frame_head *) \
-               kmalloc(MHEADER_SIZE, GFP_KERNEL);
+       ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
        if (!ks->frame_head_info) {
                pr_err("Error: Fail to allocate frame memory\n");
                return false;
@@ -1659,18 +1658,7 @@ static struct platform_driver ks8851_platform_driver = {
        .remove = __devexit_p(ks8851_remove),
 };
 
-static int __init ks8851_init(void)
-{
-       return platform_driver_register(&ks8851_platform_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
-       platform_driver_unregister(&ks8851_platform_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_platform_driver(ks8851_platform_driver);
 
 MODULE_DESCRIPTION("KS8851 MLL Network driver");
 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
index 7ece990381c8ff957fe4772a85ed4e71444de735..a718865a8fed29185ae5fcc3f82c8d13e410a99a 100644 (file)
 /* Change default LED mode. */
 #define SET_DEFAULT_LED                        LED_SPEED_DUPLEX_ACT
 
-#define MAC_ADDR_LEN                   6
-#define MAC_ADDR_ORDER(i)              (MAC_ADDR_LEN - 1 - (i))
+#define MAC_ADDR_ORDER(i)              (ETH_ALEN - 1 - (i))
 
 #define MAX_ETHERNET_BODY_SIZE         1500
 #define ETHERNET_HEADER_SIZE           14
@@ -1043,7 +1042,7 @@ enum {
  * @valid:     Valid setting indicating the entry is being used.
  */
 struct ksz_mac_table {
-       u8 mac_addr[MAC_ADDR_LEN];
+       u8 mac_addr[ETH_ALEN];
        u16 vid;
        u8 fid;
        u8 ports;
@@ -1187,8 +1186,8 @@ struct ksz_switch {
        u8 diffserv[DIFFSERV_ENTRIES];
        u8 p_802_1p[PRIO_802_1P_ENTRIES];
 
-       u8 br_addr[MAC_ADDR_LEN];
-       u8 other_addr[MAC_ADDR_LEN];
+       u8 br_addr[ETH_ALEN];
+       u8 other_addr[ETH_ALEN];
 
        u8 broad_per;
        u8 member;
@@ -1292,14 +1291,14 @@ struct ksz_hw {
        int tx_int_mask;
        int tx_size;
 
-       u8 perm_addr[MAC_ADDR_LEN];
-       u8 override_addr[MAC_ADDR_LEN];
-       u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+       u8 perm_addr[ETH_ALEN];
+       u8 override_addr[ETH_ALEN];
+       u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
        u8 addr_list_size;
        u8 mac_override;
        u8 promiscuous;
        u8 all_multi;
-       u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+       u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
        u8 multi_bits[HW_MULTICAST_SIZE];
        u8 multi_list_size;
 
@@ -3654,7 +3653,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
        static const u8 mask[] = { 0x3F };
        static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
 
-       hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+       hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
 }
 
 /**
@@ -3689,7 +3688,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
 {
        static const u8 mask[] = { 0x3F };
 
-       hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+       hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
 }
 
 /**
@@ -4055,7 +4054,7 @@ static void hw_set_addr(struct ksz_hw *hw)
 {
        int i;
 
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
                        hw->io + KS884X_ADDR_0_OFFSET + i);
 
@@ -4072,17 +4071,16 @@ static void hw_read_addr(struct ksz_hw *hw)
 {
        int i;
 
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
                        KS884X_ADDR_0_OFFSET + i);
 
        if (!hw->mac_override) {
-               memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+               memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
                if (empty_addr(hw->override_addr)) {
-                       memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
-                               MAC_ADDR_LEN);
+                       memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
                        memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
-                               MAC_ADDR_LEN);
+                              ETH_ALEN);
                        hw->override_addr[5] += hw->id;
                        hw_set_addr(hw);
                }
@@ -4130,16 +4128,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
        int i;
        int j = ADDITIONAL_ENTRIES;
 
-       if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+       if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
                return 0;
        for (i = 0; i < hw->addr_list_size; i++) {
-               if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+               if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
                        return 0;
                if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
                        j = i;
        }
        if (j < ADDITIONAL_ENTRIES) {
-               memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+               memcpy(hw->address[j], mac_addr, ETH_ALEN);
                hw_ena_add_addr(hw, j, hw->address[j]);
                return 0;
        }
@@ -4151,8 +4149,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
        int i;
 
        for (i = 0; i < hw->addr_list_size; i++) {
-               if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
-                       memset(hw->address[i], 0, MAC_ADDR_LEN);
+               if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+                       memset(hw->address[i], 0, ETH_ALEN);
                        writel(0, hw->io + ADD_ADDR_INCR * i +
                                KS_ADD_ADDR_0_HI);
                        return 0;
@@ -4382,12 +4380,10 @@ static void ksz_update_timer(struct ksz_timer_info *info)
  */
 static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
 {
-       desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
-               GFP_KERNEL);
+       desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+                                 GFP_KERNEL);
        if (!desc_info->ring)
                return 1;
-       memset((void *) desc_info->ring, 0,
-               sizeof(struct ksz_desc) * desc_info->alloc);
        hw_init_desc(desc_info, transmit);
        return 0;
 }
@@ -5676,7 +5672,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
                hw_del_addr(hw, dev->dev_addr);
        else {
                hw->mac_override = 1;
-               memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+               memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
        }
 
        memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
@@ -5786,7 +5782,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
                netdev_for_each_mc_addr(ha, dev) {
                        if (i >= MAX_MULTICAST_LIST)
                                break;
-                       memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+                       memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
                }
                hw->multi_list_size = (u8) i;
                hw_set_grp_addr(hw);
@@ -6093,9 +6089,10 @@ static void netdev_get_drvinfo(struct net_device *dev,
        struct dev_priv *priv = netdev_priv(dev);
        struct dev_info *hw_priv = priv->adapter;
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(hw_priv->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+               sizeof(info->bus_info));
 }
 
 /**
@@ -6587,7 +6584,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
  *
  * Return 0 if successful; otherwise an error code.
  */
-static int netdev_set_features(struct net_device *dev, u32 features)
+static int netdev_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct dev_priv *priv = netdev_priv(dev);
        struct dev_info *hw_priv = priv->adapter;
@@ -6860,7 +6858,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
        int num;
 
        i = j = num = got_num = 0;
-       while (j < MAC_ADDR_LEN) {
+       while (j < ETH_ALEN) {
                if (macaddr[i]) {
                        int digit;
 
@@ -6891,7 +6889,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
                }
                i++;
        }
-       if (MAC_ADDR_LEN == j) {
+       if (ETH_ALEN == j) {
                if (MAIN_PORT == port)
                        hw_priv->hw.mac_override = 1;
        }
@@ -7058,7 +7056,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
 
        /* Multiple device interfaces mode requires a second MAC address. */
        if (hw->dev_count > 1) {
-               memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+               memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
                read_other_addr(hw);
                if (mac1addr[0] != ':')
                        get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
@@ -7108,12 +7106,11 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
                dev->irq = pdev->irq;
                if (MAIN_PORT == i)
                        memcpy(dev->dev_addr, hw_priv->hw.override_addr,
-                               MAC_ADDR_LEN);
+                              ETH_ALEN);
                else {
-                       memcpy(dev->dev_addr, sw->other_addr,
-                               MAC_ADDR_LEN);
+                       memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
                        if (!memcmp(sw->other_addr, hw->override_addr,
-                                       MAC_ADDR_LEN))
+                                   ETH_ALEN))
                                dev->dev_addr[5] += port->first_port;
                }
 
index 0778edcf7b9a1ecee3ffcfcfd058043c317ce194..20b72ecb020a5039f86518046f3072845070b770 100644 (file)
@@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
         * access to avoid theoretical race condition with functions that
         * change NETIF_F_LRO flag at runtime.
         */
-       bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+       bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
 
        while (rx_done->entry[idx].length != 0 && work_done < budget) {
                length = ntohs(rx_done->entry[idx].length);
@@ -3149,7 +3149,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
        return 0;
 }
 
-static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t myri10ge_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        if (!(features & NETIF_F_RXCSUM))
                features &= ~NETIF_F_LRO;
index 11be150e4d67fc1cb70906535a9356dd38051ee6..b7fc26c4f73892fe177ab4f6527d349ca0afc06d 100644 (file)
@@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type {
        MXGEFW_CMD_GET_DCA_OFFSET = 56,
        /* offset of dca control for WDMAs */
 
-       /* VMWare NetQueue commands */
+       /* VMware NetQueue commands */
        MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
        MXGEFW_CMD_NETQ_ADD_FILTER = 58,
        /* data0 = filter_id << 16 | queue << 8 | type */
index fc7c6a932ad95b8e19e0f977f0144842ef439bd9..5b89fd377ae3723c0bfed198c8a145dda867b53f 100644 (file)
@@ -294,15 +294,4 @@ static struct platform_driver jazz_sonic_driver = {
        },
 };
 
-static int __init jazz_sonic_init_module(void)
-{
-       return platform_driver_register(&jazz_sonic_driver);
-}
-
-static void __exit jazz_sonic_cleanup_module(void)
-{
-       platform_driver_unregister(&jazz_sonic_driver);
-}
-
-module_init(jazz_sonic_init_module);
-module_exit(jazz_sonic_cleanup_module);
+module_platform_driver(jazz_sonic_driver);
index a2eacbfb4252fa0f8da5dee5928abc380f3a11e0..70367d76fc8dc56467819bbc249c95e19d0ac8f9 100644 (file)
@@ -643,15 +643,4 @@ static struct platform_driver mac_sonic_driver = {
        },
 };
 
-static int __init mac_sonic_init_module(void)
-{
-       return platform_driver_register(&mac_sonic_driver);
-}
-
-static void __exit mac_sonic_cleanup_module(void)
-{
-       platform_driver_unregister(&mac_sonic_driver);
-}
-
-module_init(mac_sonic_init_module);
-module_exit(mac_sonic_cleanup_module);
+module_platform_driver(mac_sonic_driver);
index 6ca047aab7938f3e0b79a3ceccbd5355491bf503..ac7b16b6e7af34d4ce91e068b47c2216e773f569 100644 (file)
@@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev)
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct netdev_private *np = netdev_priv(dev);
-       strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
-       strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
-       strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_regs_len(struct net_device *dev)
index 2b8f64ddfb5530ac98ba06dac76dfe40081adde2..c24b46cbfe27af958ffc6a4b1603cfbb4ea74928 100644 (file)
@@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev,
 static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
 {
        struct ns83820 *dev = PRIV(ndev);
-       strcpy(info->driver, "ns83820");
-       strcpy(info->version, VERSION);
-       strcpy(info->bus_info, pci_name(dev->pci_dev));
+       strlcpy(info->driver, "ns83820", sizeof(info->driver));
+       strlcpy(info->version, VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
 }
 
 static u32 ns83820_get_link(struct net_device *ndev)
index ccf61b9da8d1f59bd0be03a8e2d394683bdfa174..e01c0a07a93ac15f490021b34e321f64433537b6 100644 (file)
@@ -319,15 +319,4 @@ static struct platform_driver xtsonic_driver = {
        },
 };
 
-static int __init xtsonic_init(void)
-{
-       return platform_driver_register(&xtsonic_driver);
-}
-
-static void __exit xtsonic_cleanup(void)
-{
-       platform_driver_unregister(&xtsonic_driver);
-}
-
-module_init(xtsonic_init);
-module_exit(xtsonic_cleanup);
+module_platform_driver(xtsonic_driver);
index c27fb3dda9f49d7dfc6ac9c87c0f9ff6994323cd..97f63e12d86e21799f9473eee76d6052952b5fbb 100644 (file)
@@ -5391,10 +5391,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
 {
        struct s2io_nic *sp = netdev_priv(dev);
 
-       strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
-       strncpy(info->version, s2io_driver_version, sizeof(info->version));
-       strncpy(info->fw_version, "", sizeof(info->fw_version));
-       strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+       strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
+       strlcpy(info->version, s2io_driver_version, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
        info->regdump_len = XENA_REG_SPACE;
        info->eedump_len = XENA_EEPROM_SPACE;
 }
@@ -6616,10 +6615,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
        }
 }
 
-static int s2io_set_features(struct net_device *dev, u32 features)
+static int s2io_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct s2io_nic *sp = netdev_priv(dev);
-       u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+       netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
 
        if (changed && netif_running(dev)) {
                int rc;
index a83197d757c1e4557dc1b883fa341138d0fd3951..ef76725454d250126b18d98998deca4b2fb7212d 100644 (file)
@@ -2662,9 +2662,10 @@ static void vxge_poll_vp_lockup(unsigned long data)
        mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
 }
 
-static u32 vxge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vxge_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        /* Enabling RTH requires some of the logic in vxge_device_register and a
         * vpath reset.  Due to these restrictions, only allow modification
@@ -2676,10 +2677,10 @@ static u32 vxge_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int vxge_set_features(struct net_device *dev, u32 features)
+static int vxge_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct vxgedev *vdev = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (!(changed & NETIF_F_RXHASH))
                return 0;
@@ -3304,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev)
  *
  * Add the vlan id to the devices vlan id table
  */
-static void
+static int
 vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct vxgedev *vdev = netdev_priv(dev);
@@ -3319,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                vxge_hw_vpath_vid_add(vpath->handle, vid);
        }
        set_bit(vid, vdev->active_vlans);
+       return 0;
 }
 
 /**
@@ -3328,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  *
  * Remove the vlan id from the device's vlan id table
  */
-static void
+static int
 vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct vxgedev *vdev = netdev_priv(dev);
@@ -3347,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d  Exiting...", __func__, __LINE__);
        clear_bit(vid, vdev->active_vlans);
+       return 0;
 }
 
 static const struct net_device_ops vxge_netdev_ops = {
index f1bfb8f8fcf01759ec711b80a413780e0eea030c..b75a0497d58dbdbdb362d8e857ce8f75bf7e22f9 100644 (file)
@@ -1103,18 +1103,7 @@ static struct platform_driver w90p910_ether_driver = {
        },
 };
 
-static int __init w90p910_ether_init(void)
-{
-       return platform_driver_register(&w90p910_ether_driver);
-}
-
-static void __exit w90p910_ether_exit(void)
-{
-       platform_driver_unregister(&w90p910_ether_driver);
-}
-
-module_init(w90p910_ether_init);
-module_exit(w90p910_ether_exit);
+module_platform_driver(w90p910_ether_driver);
 
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910 MAC driver!");
index 1dca57013cb2d868f3881a528a3dccc27e7605a9..4c4e7f4583830ce9231bd1ebef8ab3a1fed99fa2 100644 (file)
@@ -65,7 +65,8 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/prefetch.h>
-#include  <linux/io.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/io.h>
 
 #include <asm/irq.h>
 #include <asm/system.h>
@@ -609,7 +610,7 @@ struct nv_ethtool_str {
 };
 
 static const struct nv_ethtool_str nv_estats_str[] = {
-       { "tx_bytes" },
+       { "tx_bytes" }, /* includes Ethernet FCS CRC */
        { "tx_zero_rexmt" },
        { "tx_one_rexmt" },
        { "tx_many_rexmt" },
@@ -637,7 +638,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
        /* version 2 stats */
        { "tx_deferral" },
        { "tx_packets" },
-       { "rx_bytes" },
+       { "rx_bytes" }, /* includes Ethernet FCS CRC */
        { "tx_pause" },
        { "rx_pause" },
        { "rx_drop_frame" },
@@ -649,7 +650,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
 };
 
 struct nv_ethtool_stats {
-       u64 tx_bytes;
+       u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
        u64 tx_zero_rexmt;
        u64 tx_one_rexmt;
        u64 tx_many_rexmt;
@@ -670,14 +671,14 @@ struct nv_ethtool_stats {
        u64 rx_unicast;
        u64 rx_multicast;
        u64 rx_broadcast;
-       u64 rx_packets;
+       u64 rx_packets; /* should be ifconfig->rx_packets */
        u64 rx_errors_total;
        u64 tx_errors_total;
 
        /* version 2 stats */
        u64 tx_deferral;
-       u64 tx_packets;
-       u64 rx_bytes;
+       u64 tx_packets; /* should be ifconfig->tx_packets */
+       u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
        u64 tx_pause;
        u64 rx_pause;
        u64 rx_drop_frame;
@@ -736,6 +737,16 @@ struct nv_skb_map {
  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  *     needs netdev_priv(dev)->lock :-(
  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ *
+ * Hardware stats updates are protected by hwstats_lock:
+ * - updated by nv_do_stats_poll (timer). This is meant to avoid
+ *   integer wraparound in the NIC stats registers, at low frequency
+ *   (0.1 Hz)
+ * - updated by nv_get_ethtool_stats + nv_get_stats64
+ *
+ * Software stats are accessed only through 64b synchronization points
+ * and are not subject to other synchronization techniques (single
+ * update thread on the TX or RX paths).
  */
 
 /* in dev: base, irq */
@@ -745,9 +756,10 @@ struct fe_priv {
        struct net_device *dev;
        struct napi_struct napi;
 
-       /* General data:
-        * Locking: spin_lock(&np->lock); */
+       /* hardware stats are updated in syscall and timer */
+       spinlock_t hwstats_lock;
        struct nv_ethtool_stats estats;
+
        int in_shutdown;
        u32 linkspeed;
        int duplex;
@@ -798,6 +810,13 @@ struct fe_priv {
        u32 nic_poll_irq;
        int rx_ring_size;
 
+       /* RX software stats */
+       struct u64_stats_sync swstats_rx_syncp;
+       u64 stat_rx_packets;
+       u64 stat_rx_bytes; /* not always available in HW */
+       u64 stat_rx_missed_errors;
+       u64 stat_rx_dropped;
+
        /* media detection workaround.
         * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
         */
@@ -820,6 +839,12 @@ struct fe_priv {
        struct nv_skb_map *tx_end_flip;
        int tx_stop;
 
+       /* TX software stats */
+       struct u64_stats_sync swstats_tx_syncp;
+       u64 stat_tx_packets; /* not always available in HW */
+       u64 stat_tx_bytes;
+       u64 stat_tx_dropped;
+
        /* msi/msi-x fields */
        u32 msi_flags;
        struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -891,6 +916,11 @@ enum {
 };
 static int dma_64bit = NV_DMA_64BIT_ENABLED;
 
+/*
+ * Debug output control for tx_timeout
+ */
+static bool debug_tx_timeout = false;
+
 /*
  * Crossover Detection
  * Realtek 8201 phy + some OEM boards do not work properly.
@@ -1630,11 +1660,19 @@ static void nv_mac_reset(struct net_device *dev)
        pci_push(base);
 }
 
-static void nv_get_hw_stats(struct net_device *dev)
+/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
+static void nv_update_stats(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 
+       /* If it happens that this is run in top-half context, then
+        * replace the spin_lock of hwstats_lock with
+        * spin_lock_irqsave() in calling functions. */
+       WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
+       assert_spin_locked(&np->hwstats_lock);
+
+       /* query hardware */
        np->estats.tx_bytes += readl(base + NvRegTxCnt);
        np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
        np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
@@ -1693,33 +1731,73 @@ static void nv_get_hw_stats(struct net_device *dev)
 }
 
 /*
- * nv_get_stats: dev->get_stats function
+ * nv_get_stats64: dev->ndo_get_stats64 function
  * Get latest stats value from the nic.
  * Called with read_lock(&dev_base_lock) held for read -
  * only synchronized against unregister_netdevice.
  */
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64*
+nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+       __acquires(&netdev_priv(dev)->hwstats_lock)
+       __releases(&netdev_priv(dev)->hwstats_lock)
 {
        struct fe_priv *np = netdev_priv(dev);
+       unsigned int syncp_start;
+
+       /*
+        * Note: because HW stats are not always available and for
+        * consistency reasons, the following ifconfig stats are
+        * managed by software: rx_bytes, tx_bytes, rx_packets and
+        * tx_packets. The related hardware stats reported by ethtool
+        * should be equivalent to these ifconfig stats, with 4
+        * additional bytes per packet (Ethernet FCS CRC), except for
+        * tx_packets when TSO kicks in.
+        */
+
+       /* software stats */
+       do {
+               syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+               storage->rx_packets       = np->stat_rx_packets;
+               storage->rx_bytes         = np->stat_rx_bytes;
+               storage->rx_dropped       = np->stat_rx_dropped;
+               storage->rx_missed_errors = np->stat_rx_missed_errors;
+       } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+
+       do {
+               syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+               storage->tx_packets = np->stat_tx_packets;
+               storage->tx_bytes   = np->stat_tx_bytes;
+               storage->tx_dropped = np->stat_tx_dropped;
+       } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
 
        /* If the nic supports hw counters then retrieve latest values */
-       if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
-               nv_get_hw_stats(dev);
-
-               /* copy to net_device stats */
-               dev->stats.tx_packets = np->estats.tx_packets;
-               dev->stats.rx_bytes = np->estats.rx_bytes;
-               dev->stats.tx_bytes = np->estats.tx_bytes;
-               dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
-               dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
-               dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
-               dev->stats.rx_over_errors = np->estats.rx_over_errors;
-               dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
-               dev->stats.rx_errors = np->estats.rx_errors_total;
-               dev->stats.tx_errors = np->estats.tx_errors_total;
-       }
-
-       return &dev->stats;
+       if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+               spin_lock_bh(&np->hwstats_lock);
+
+               nv_update_stats(dev);
+
+               /* generic stats */
+               storage->rx_errors = np->estats.rx_errors_total;
+               storage->tx_errors = np->estats.tx_errors_total;
+
+               /* meaningful only when NIC supports stats v3 */
+               storage->multicast = np->estats.rx_multicast;
+
+               /* detailed rx_errors */
+               storage->rx_length_errors = np->estats.rx_length_error;
+               storage->rx_over_errors   = np->estats.rx_over_errors;
+               storage->rx_crc_errors    = np->estats.rx_crc_errors;
+               storage->rx_frame_errors  = np->estats.rx_frame_align_error;
+               storage->rx_fifo_errors   = np->estats.rx_drop_frame;
+
+               /* detailed tx_errors */
+               storage->tx_carrier_errors = np->estats.tx_carrier_errors;
+               storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
+
+               spin_unlock_bh(&np->hwstats_lock);
+       }
+
+       return storage;
 }
 
 /*
@@ -1752,8 +1830,12 @@ static int nv_alloc_rx(struct net_device *dev)
                                np->put_rx.orig = np->first_rx.orig;
                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
                                np->put_rx_ctx = np->first_rx_ctx;
-               } else
+               } else {
+                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                       np->stat_rx_dropped++;
+                       u64_stats_update_end(&np->swstats_rx_syncp);
                        return 1;
+               }
        }
        return 0;
 }
@@ -1784,8 +1866,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
                                np->put_rx.ex = np->first_rx.ex;
                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
                                np->put_rx_ctx = np->first_rx_ctx;
-               } else
+               } else {
+                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                       np->stat_rx_dropped++;
+                       u64_stats_update_end(&np->swstats_rx_syncp);
                        return 1;
+               }
        }
        return 0;
 }
@@ -1842,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev)
                np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
        np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
        np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+       netdev_reset_queue(np->dev);
        np->tx_pkts_in_progress = 0;
        np->tx_change_owner = NULL;
        np->tx_end_flip = NULL;
@@ -1920,8 +2007,11 @@ static void nv_drain_tx(struct net_device *dev)
                        np->tx_ring.ex[i].bufhigh = 0;
                        np->tx_ring.ex[i].buflow = 0;
                }
-               if (nv_release_txskb(np, &np->tx_skb[i]))
-                       dev->stats.tx_dropped++;
+               if (nv_release_txskb(np, &np->tx_skb[i])) {
+                       u64_stats_update_begin(&np->swstats_tx_syncp);
+                       np->stat_tx_dropped++;
+                       u64_stats_update_end(&np->swstats_tx_syncp);
+               }
                np->tx_skb[i].dma = 0;
                np->tx_skb[i].dma_len = 0;
                np->tx_skb[i].dma_single = 0;
@@ -2187,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* set tx flags */
        start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+       netdev_sent_queue(np->dev, skb->len);
+
        np->put_tx.orig = put_tx;
 
        spin_unlock_irqrestore(&np->lock, flags);
@@ -2331,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 
        /* set tx flags */
        start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+       netdev_sent_queue(np->dev, skb->len);
+
        np->put_tx.ex = put_tx;
 
        spin_unlock_irqrestore(&np->lock, flags);
@@ -2368,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
        u32 flags;
        int tx_work = 0;
        struct ring_desc *orig_get_tx = np->get_tx.orig;
+       unsigned int bytes_compl = 0;
 
        while ((np->get_tx.orig != np->put_tx.orig) &&
               !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2378,9 +2475,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
                if (np->desc_ver == DESC_VER_1) {
                        if (flags & NV_TX_LASTPACKET) {
                                if (flags & NV_TX_ERROR) {
-                                       if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+                                       if ((flags & NV_TX_RETRYERROR)
+                                           && !(flags & NV_TX_RETRYCOUNT_MASK))
                                                nv_legacybackoff_reseed(dev);
+                               } else {
+                                       u64_stats_update_begin(&np->swstats_tx_syncp);
+                                       np->stat_tx_packets++;
+                                       np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                                       u64_stats_update_end(&np->swstats_tx_syncp);
                                }
+                               bytes_compl += np->get_tx_ctx->skb->len;
                                dev_kfree_skb_any(np->get_tx_ctx->skb);
                                np->get_tx_ctx->skb = NULL;
                                tx_work++;
@@ -2388,9 +2492,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
                } else {
                        if (flags & NV_TX2_LASTPACKET) {
                                if (flags & NV_TX2_ERROR) {
-                                       if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+                                       if ((flags & NV_TX2_RETRYERROR)
+                                           && !(flags & NV_TX2_RETRYCOUNT_MASK))
                                                nv_legacybackoff_reseed(dev);
+                               } else {
+                                       u64_stats_update_begin(&np->swstats_tx_syncp);
+                                       np->stat_tx_packets++;
+                                       np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                                       u64_stats_update_end(&np->swstats_tx_syncp);
                                }
+                               bytes_compl += np->get_tx_ctx->skb->len;
                                dev_kfree_skb_any(np->get_tx_ctx->skb);
                                np->get_tx_ctx->skb = NULL;
                                tx_work++;
@@ -2401,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
                        np->get_tx_ctx = np->first_tx_ctx;
        }
+
+       netdev_completed_queue(np->dev, tx_work, bytes_compl);
+
        if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
                np->tx_stop = 0;
                netif_wake_queue(dev);
@@ -2414,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
        u32 flags;
        int tx_work = 0;
        struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+       unsigned long bytes_cleaned = 0;
 
        while ((np->get_tx.ex != np->put_tx.ex) &&
               !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2423,14 +2538,21 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
 
                if (flags & NV_TX2_LASTPACKET) {
                        if (flags & NV_TX2_ERROR) {
-                               if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+                               if ((flags & NV_TX2_RETRYERROR)
+                                   && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
                                        if (np->driver_data & DEV_HAS_GEAR_MODE)
                                                nv_gear_backoff_reseed(dev);
                                        else
                                                nv_legacybackoff_reseed(dev);
                                }
+                       } else {
+                               u64_stats_update_begin(&np->swstats_tx_syncp);
+                               np->stat_tx_packets++;
+                               np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                               u64_stats_update_end(&np->swstats_tx_syncp);
                        }
 
+                       bytes_cleaned += np->get_tx_ctx->skb->len;
                        dev_kfree_skb_any(np->get_tx_ctx->skb);
                        np->get_tx_ctx->skb = NULL;
                        tx_work++;
@@ -2438,11 +2560,15 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
                        if (np->tx_limit)
                                nv_tx_flip_ownership(dev);
                }
+
                if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
                        np->get_tx.ex = np->first_tx.ex;
                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
                        np->get_tx_ctx = np->first_tx_ctx;
        }
+
+       netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
+
        if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
                np->tx_stop = 0;
                netif_wake_queue(dev);
@@ -2461,56 +2587,64 @@ static void nv_tx_timeout(struct net_device *dev)
        u32 status;
        union ring_type put_tx;
        int saved_tx_limit;
-       int i;
 
        if (np->msi_flags & NV_MSI_X_ENABLED)
                status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
        else
                status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
 
-       netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+       netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
 
-       netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
-       netdev_info(dev, "Dumping tx registers\n");
-       for (i = 0; i <= np->register_size; i += 32) {
-               netdev_info(dev,
-                           "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
-                           i,
-                           readl(base + i + 0), readl(base + i + 4),
-                           readl(base + i + 8), readl(base + i + 12),
-                           readl(base + i + 16), readl(base + i + 20),
-                           readl(base + i + 24), readl(base + i + 28));
-       }
-       netdev_info(dev, "Dumping tx ring\n");
-       for (i = 0; i < np->tx_ring_size; i += 4) {
-               if (!nv_optimized(np)) {
-                       netdev_info(dev,
-                                   "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
-                                   i,
-                                   le32_to_cpu(np->tx_ring.orig[i].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i].flaglen),
-                                   le32_to_cpu(np->tx_ring.orig[i+1].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
-                                   le32_to_cpu(np->tx_ring.orig[i+2].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
-                                   le32_to_cpu(np->tx_ring.orig[i+3].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
-               } else {
+       if (unlikely(debug_tx_timeout)) {
+               int i;
+
+               netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+               netdev_info(dev, "Dumping tx registers\n");
+               for (i = 0; i <= np->register_size; i += 32) {
                        netdev_info(dev,
-                                   "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+                                   "%3x: %08x %08x %08x %08x "
+                                   "%08x %08x %08x %08x\n",
                                    i,
-                                   le32_to_cpu(np->tx_ring.ex[i].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i].flaglen),
-                                   le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i+1].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
-                                   le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i+2].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
-                                   le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i+3].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+                                   readl(base + i + 0), readl(base + i + 4),
+                                   readl(base + i + 8), readl(base + i + 12),
+                                   readl(base + i + 16), readl(base + i + 20),
+                                   readl(base + i + 24), readl(base + i + 28));
+               }
+               netdev_info(dev, "Dumping tx ring\n");
+               for (i = 0; i < np->tx_ring_size; i += 4) {
+                       if (!nv_optimized(np)) {
+                               netdev_info(dev,
+                                           "%03x: %08x %08x // %08x %08x "
+                                           "// %08x %08x // %08x %08x\n",
+                                           i,
+                                           le32_to_cpu(np->tx_ring.orig[i].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i].flaglen),
+                                           le32_to_cpu(np->tx_ring.orig[i+1].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+                                           le32_to_cpu(np->tx_ring.orig[i+2].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+                                           le32_to_cpu(np->tx_ring.orig[i+3].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+                       } else {
+                               netdev_info(dev,
+                                           "%03x: %08x %08x %08x "
+                                           "// %08x %08x %08x "
+                                           "// %08x %08x %08x "
+                                           "// %08x %08x %08x\n",
+                                           i,
+                                           le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i].flaglen),
+                                           le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+                                           le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+                                           le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+                       }
                }
        }
 
@@ -2633,8 +2767,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
                                        }
                                        /* the rest are hard errors */
                                        else {
-                                               if (flags & NV_RX_MISSEDFRAME)
-                                                       dev->stats.rx_missed_errors++;
+                                               if (flags & NV_RX_MISSEDFRAME) {
+                                                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                                                       np->stat_rx_missed_errors++;
+                                                       u64_stats_update_end(&np->swstats_rx_syncp);
+                                               }
                                                dev_kfree_skb(skb);
                                                goto next_pkt;
                                        }
@@ -2677,7 +2814,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
                skb_put(skb, len);
                skb->protocol = eth_type_trans(skb, dev);
                napi_gro_receive(&np->napi, skb);
-               dev->stats.rx_packets++;
+               u64_stats_update_begin(&np->swstats_rx_syncp);
+               np->stat_rx_packets++;
+               np->stat_rx_bytes += len;
+               u64_stats_update_end(&np->swstats_rx_syncp);
 next_pkt:
                if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
                        np->get_rx.orig = np->first_rx.orig;
@@ -2760,7 +2900,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
                                __vlan_hwaccel_put_tag(skb, vid);
                        }
                        napi_gro_receive(&np->napi, skb);
-                       dev->stats.rx_packets++;
+                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                       np->stat_rx_packets++;
+                       np->stat_rx_bytes += len;
+                       u64_stats_update_end(&np->swstats_rx_syncp);
                } else {
                        dev_kfree_skb(skb);
                }
@@ -3003,6 +3146,73 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
        }
 }
 
+static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
+{
+       struct fe_priv *np = netdev_priv(dev);
+       u8 __iomem *base = get_hwbase(dev);
+       u32 phyreg, txreg;
+       int mii_status;
+
+       np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
+       np->duplex = duplex;
+
+       /* see if gigabit phy */
+       mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+       if (mii_status & PHY_GIGABIT) {
+               np->gigabit = PHY_GIGABIT;
+               phyreg = readl(base + NvRegSlotTime);
+               phyreg &= ~(0x3FF00);
+               if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+                       phyreg |= NVREG_SLOTTIME_10_100_FULL;
+               else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+                       phyreg |= NVREG_SLOTTIME_10_100_FULL;
+               else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+                       phyreg |= NVREG_SLOTTIME_1000_FULL;
+               writel(phyreg, base + NvRegSlotTime);
+       }
+
+       phyreg = readl(base + NvRegPhyInterface);
+       phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+       if (np->duplex == 0)
+               phyreg |= PHY_HALF;
+       if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+               phyreg |= PHY_100;
+       else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+                                                       NVREG_LINKSPEED_1000)
+               phyreg |= PHY_1000;
+       writel(phyreg, base + NvRegPhyInterface);
+
+       if (phyreg & PHY_RGMII) {
+               if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+                                                       NVREG_LINKSPEED_1000)
+                       txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+               else
+                       txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+       } else {
+               txreg = NVREG_TX_DEFERRAL_DEFAULT;
+       }
+       writel(txreg, base + NvRegTxDeferral);
+
+       if (np->desc_ver == DESC_VER_1) {
+               txreg = NVREG_TX_WM_DESC1_DEFAULT;
+       } else {
+               if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+                                        NVREG_LINKSPEED_1000)
+                       txreg = NVREG_TX_WM_DESC2_3_1000;
+               else
+                       txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+       }
+       writel(txreg, base + NvRegTxWatermark);
+
+       writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+                       base + NvRegMisc1);
+       pci_push(base);
+       writel(np->linkspeed, base + NvRegLinkSpeed);
+       pci_push(base);
+
+       return;
+}
+
 /**
  * nv_update_linkspeed: Setup the MAC according to the link partner
  * @dev: Network device to be configured
@@ -3024,11 +3234,25 @@ static int nv_update_linkspeed(struct net_device *dev)
        int newls = np->linkspeed;
        int newdup = np->duplex;
        int mii_status;
+       u32 bmcr;
        int retval = 0;
        u32 control_1000, status_1000, phyreg, pause_flags, txreg;
        u32 txrxFlags = 0;
        u32 phy_exp;
 
+       /* If device loopback is enabled, set carrier on and enable max link
+        * speed.
+        */
+       bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+       if (bmcr & BMCR_LOOPBACK) {
+               if (netif_running(dev)) {
+                       nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
+                       if (!netif_carrier_ok(dev))
+                               netif_carrier_on(dev);
+               }
+               return 1;
+       }
+
        /* BMSR_LSTATUS is latched, read it twice:
         * we want the current value.
         */
@@ -3711,6 +3935,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                writel(0, base + NvRegMSIXMap0);
                                writel(0, base + NvRegMSIXMap1);
                        }
+                       netdev_info(dev, "MSI-X enabled\n");
                }
        }
        if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
@@ -3732,6 +3957,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                        writel(0, base + NvRegMSIMap1);
                        /* enable msi vector 0 */
                        writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+                       netdev_info(dev, "MSI enabled\n");
                }
        }
        if (ret != 0) {
@@ -3886,11 +4112,18 @@ static void nv_poll_controller(struct net_device *dev)
 #endif
 
 static void nv_do_stats_poll(unsigned long data)
+       __acquires(&netdev_priv(dev)->hwstats_lock)
+       __releases(&netdev_priv(dev)->hwstats_lock)
 {
        struct net_device *dev = (struct net_device *) data;
        struct fe_priv *np = netdev_priv(dev);
 
-       nv_get_hw_stats(dev);
+       /* If lock is currently taken, the stats are being refreshed
+        * and hence fresh enough */
+       if (spin_trylock(&np->hwstats_lock)) {
+               nv_update_stats(dev);
+               spin_unlock(&np->hwstats_lock);
+       }
 
        if (!np->in_shutdown)
                mod_timer(&np->stats_poll,
@@ -3900,9 +4133,9 @@ static void nv_do_stats_poll(unsigned long data)
 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct fe_priv *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, FORCEDETH_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -4455,7 +4688,63 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
        return 0;
 }
 
-static u32 nv_fix_features(struct net_device *dev, u32 features)
+static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+       struct fe_priv *np = netdev_priv(dev);
+       unsigned long flags;
+       u32 miicontrol;
+       int err, retval = 0;
+
+       spin_lock_irqsave(&np->lock, flags);
+       miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+       if (features & NETIF_F_LOOPBACK) {
+               if (miicontrol & BMCR_LOOPBACK) {
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       netdev_info(dev, "Loopback already enabled\n");
+                       return 0;
+               }
+               nv_disable_irq(dev);
+               /* Turn on loopback mode */
+               miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+               err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
+               if (err) {
+                       retval = PHY_ERROR;
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       phy_init(dev);
+               } else {
+                       if (netif_running(dev)) {
+                               /* Force 1000 Mbps full-duplex */
+                               nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
+                                                                        1);
+                               /* Force link up */
+                               netif_carrier_on(dev);
+                       }
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       netdev_info(dev,
+                               "Internal PHY loopback mode enabled.\n");
+               }
+       } else {
+               if (!(miicontrol & BMCR_LOOPBACK)) {
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       netdev_info(dev, "Loopback already disabled\n");
+                       return 0;
+               }
+               nv_disable_irq(dev);
+               /* Turn off loopback */
+               spin_unlock_irqrestore(&np->lock, flags);
+               netdev_info(dev, "Internal PHY loopback mode disabled.\n");
+               phy_init(dev);
+       }
+       msleep(500);
+       spin_lock_irqsave(&np->lock, flags);
+       nv_enable_irq(dev);
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return retval;
+}
+
+static netdev_features_t nv_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /* vlan is dependent on rx checksum offload */
        if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
@@ -4464,7 +4753,7 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static void nv_vlan_mode(struct net_device *dev, u32 features)
+static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct fe_priv *np = get_nvpriv(dev);
 
@@ -4485,11 +4774,18 @@ static void nv_vlan_mode(struct net_device *dev, u32 features)
        spin_unlock_irq(&np->lock);
 }
 
-static int nv_set_features(struct net_device *dev, u32 features)
+static int nv_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
+       int retval;
+
+       if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
+               retval = nv_set_loopback(dev, features);
+               if (retval != 0)
+                       return retval;
+       }
 
        if (changed & NETIF_F_RXCSUM) {
                spin_lock_irq(&np->lock);
@@ -4535,14 +4831,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
-static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+static void nv_get_ethtool_stats(struct net_device *dev,
+                                struct ethtool_stats *estats, u64 *buffer)
+       __acquires(&netdev_priv(dev)->hwstats_lock)
+       __releases(&netdev_priv(dev)->hwstats_lock)
 {
        struct fe_priv *np = netdev_priv(dev);
 
-       /* update stats */
-       nv_get_hw_stats(dev);
-
-       memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+       spin_lock_bh(&np->hwstats_lock);
+       nv_update_stats(dev);
+       memcpy(buffer, &np->estats,
+              nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+       spin_unlock_bh(&np->hwstats_lock);
 }
 
 static int nv_link_test(struct net_device *dev)
@@ -5124,6 +5424,12 @@ static int nv_open(struct net_device *dev)
 
        spin_unlock_irq(&np->lock);
 
+       /* If the loopback feature was set while the device was down, make sure
+        * that it's set correctly now.
+        */
+       if (dev->features & NETIF_F_LOOPBACK)
+               nv_set_loopback(dev, dev->features);
+
        return 0;
 out_drain:
        nv_drain_rxtx(dev);
@@ -5180,7 +5486,7 @@ static int nv_close(struct net_device *dev)
 static const struct net_device_ops nv_netdev_ops = {
        .ndo_open               = nv_open,
        .ndo_stop               = nv_close,
-       .ndo_get_stats          = nv_get_stats,
+       .ndo_get_stats64        = nv_get_stats64,
        .ndo_start_xmit         = nv_start_xmit,
        .ndo_tx_timeout         = nv_tx_timeout,
        .ndo_change_mtu         = nv_change_mtu,
@@ -5197,7 +5503,7 @@ static const struct net_device_ops nv_netdev_ops = {
 static const struct net_device_ops nv_netdev_ops_optimized = {
        .ndo_open               = nv_open,
        .ndo_stop               = nv_close,
-       .ndo_get_stats          = nv_get_stats,
+       .ndo_get_stats64        = nv_get_stats64,
        .ndo_start_xmit         = nv_start_xmit_optimized,
        .ndo_tx_timeout         = nv_tx_timeout,
        .ndo_change_mtu         = nv_change_mtu,
@@ -5236,6 +5542,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        np->dev = dev;
        np->pci_dev = pci_dev;
        spin_lock_init(&np->lock);
+       spin_lock_init(&np->hwstats_lock);
        SET_NETDEV_DEV(dev, &pci_dev->dev);
 
        init_timer(&np->oom_kick);
@@ -5244,7 +5551,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        init_timer(&np->nic_poll);
        np->nic_poll.data = (unsigned long) dev;
        np->nic_poll.function = nv_do_nic_poll; /* timer handler */
-       init_timer(&np->stats_poll);
+       init_timer_deferrable(&np->stats_poll);
        np->stats_poll.data = (unsigned long) dev;
        np->stats_poll.function = nv_do_stats_poll;     /* timer handler */
 
@@ -5328,6 +5635,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 
        dev->features |= dev->hw_features;
 
+       /* Add loopback capability to the device. */
+       dev->hw_features |= NETIF_F_LOOPBACK;
+
        np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
        if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
            (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5603,12 +5913,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
-       dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+       dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
                 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
                 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
                        "csum " : "",
                 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
                        "vlan " : "",
+                dev->features & (NETIF_F_LOOPBACK) ?
+                       "loopback " : "",
                 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
                 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
                 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
@@ -5982,6 +6294,9 @@ module_param(phy_cross, int, 0);
 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
 module_param(phy_power_down, int, 0);
 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+module_param(debug_tx_timeout, bool, 0);
+MODULE_PARM_DESC(debug_tx_timeout,
+                "Dump tx related registers and ring when tx_timeout happens");
 
 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
index 8c8027176bef98cb133b0aa5e7a03f30808df42f..ac4e72d529e5360aef0ffd9f7a1353f222a1aa0c 100644 (file)
@@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
-       strcpy(drvinfo->driver, KBUILD_MODNAME);
-       strcpy(drvinfo->version, pch_driver_version);
-       strcpy(drvinfo->fw_version, "N/A");
-       strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+       strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
 }
 
index 48406ca382f1de638e8938b4732f19406485fc83..964e9c0948bce19cf1a09ca5229575006ed1934f 100644 (file)
@@ -2109,10 +2109,11 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
  * Returns
  *     0:              HW state updated successfully
  */
-static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+static int pch_gbe_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (!(changed & NETIF_F_RXCSUM))
                return 0;
index 9c075ea2682e2025043d1099ad3d2c70c20b00bf..9cb5f912e4891f5b832bb941bce6916c5248bdb6 100644 (file)
@@ -18,8 +18,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
  */
 
-#include <linux/module.h>      /* for __MODULE_STRING */
 #include "pch_gbe.h"
+#include <linux/module.h>      /* for __MODULE_STRING */
 
 #define OPTION_UNSET   -1
 #define OPTION_DISABLED 0
index 05db5434bafc3284eb0a3b6162a64c8ed2c436d6..90497ffb1ac39d5635d258730f8cc229275aa240 100644 (file)
@@ -2,4 +2,5 @@
 # Makefile for the A Semi network device drivers.
 #
 
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
index e09ea83b8c47efe7f7061e2f046b9a45b870b82e..8a371985319f6ef8afed68ae184329bb18aa93ee 100644 (file)
@@ -83,14 +83,18 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        u32 fw_minor = 0;
        u32 fw_build = 0;
 
-       strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
-       strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+       strlcpy(drvinfo->driver, netxen_nic_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+               sizeof(drvinfo->version));
        fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
        fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
        fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
-       sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d", fw_major, fw_minor, fw_build);
 
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
        drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
 }
index 8cf3173ba48818edb795536d5accf7b7ff447de4..7dd9a4b107e622980620d231b8f178335b1c526e 100644 (file)
@@ -544,7 +544,8 @@ static void netxen_set_multicast_list(struct net_device *dev)
        adapter->set_multi(dev);
 }
 
-static u32 netxen_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        if (!(features & NETIF_F_RXCSUM)) {
                netdev_info(dev, "disabling LRO as RXCSUM is off\n");
@@ -555,7 +556,8 @@ static u32 netxen_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int netxen_set_features(struct net_device *dev, u32 features)
+static int netxen_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct netxen_adapter *adapter = netdev_priv(dev);
        int hw_lro;
index a4bdff438a5e5a25e6ec9338b8c371bc66ee2548..7931531c3a40be12f1d4f838fd854878c16b9470 100644 (file)
@@ -1735,10 +1735,11 @@ static void ql_get_drvinfo(struct net_device *ndev,
                           struct ethtool_drvinfo *drvinfo)
 {
        struct ql3_adapter *qdev = netdev_priv(ndev);
-       strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
-       strncpy(drvinfo->version, ql3xxx_driver_version, 32);
-       strncpy(drvinfo->fw_version, "N/A", 32);
-       strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+       strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ql3xxx_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = 0;
        drvinfo->eedump_len = 0;
 }
index 7ed53dbb8646fa886e0411c3f6cfba2a372e6bcf..60976fc4ccc67bb4e7131c34d399472a5e9930da 100644 (file)
@@ -1466,8 +1466,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
 
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
 int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
-int qlcnic_set_features(struct net_device *netdev, u32 features);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+       netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
 int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
index 8aa1c6e8667be48710bd9d1384db92266a6ecd55..cc228cf3d84bcfdf480d85332c4e6ab97ef13713 100644 (file)
@@ -140,11 +140,14 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
        fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
        fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
-       sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
-
-       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
-       strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
-       strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
+       strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+               sizeof(drvinfo->version));
 }
 
 static int
index bcb81e47543a3f47f2e9fd59d61c3a7d2f9d2886..b528e52a8ee1388b3ac1b6aecb5e9ac2005efa08 100644 (file)
@@ -817,12 +817,13 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
 }
 
 
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
-               u32 changed = features ^ netdev->features;
+               netdev_features_t changed = features ^ netdev->features;
                features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
        }
 
@@ -833,10 +834,10 @@ u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
 }
 
 
-int qlcnic_set_features(struct net_device *netdev, u32 features)
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
        int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
 
        if (!(changed & NETIF_F_LRO))
index 0bd163828e339fbb0c6f290167c74561a55b4471..69b8e4ef14d94ff60bf8f9e4fdee1392295d333d 100644 (file)
@@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
 
 /*  PCI Device ID Table  */
 #define ENTRY(device) \
@@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
                adapter->pvid = 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        set_bit(vid, adapter->vlans);
+       return 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
        clear_bit(vid, adapter->vlans);
+       return 0;
 }
 
 static void
@@ -792,7 +794,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
                struct qlcnic_esw_func_cfg *esw_cfg)
 {
        struct net_device *netdev = adapter->netdev;
-       unsigned long features, vlan_features;
+       netdev_features_t features, vlan_features;
 
        features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
                        NETIF_F_IPV6_CSUM | NETIF_F_GRO);
index 8731f79c9efc40439bac7af0363225d885f42036..b8478aab050e76efa2b734572b3f866f98da321f 100644 (file)
 
 
 #define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13)   /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
 #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
 #else /* all other page sizes */
 #define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
        struct ob_mac_iocb_req *queue_entry;
        u32 index;
        struct oal oal;
-       struct map_list map[MAX_SKB_FRAGS + 1];
+       struct map_list map[MAX_SKB_FRAGS + 2];
        int map_cnt;
        struct tx_ring_desc *next;
 };
index 9b67bfea035ff6082cb05c0a48fcf16f1819d607..8e2c2a74f3a5bbf05a892e30d33b2bf79fd07e5d 100644 (file)
@@ -366,13 +366,16 @@ static void ql_get_drvinfo(struct net_device *ndev,
                           struct ethtool_drvinfo *drvinfo)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
-       strncpy(drvinfo->driver, qlge_driver_name, 32);
-       strncpy(drvinfo->version, qlge_driver_version, 32);
-       snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+       strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, qlge_driver_version,
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "v%d.%d.%d",
                 (qdev->fw_rev_id & 0x00ff0000) >> 16,
                 (qdev->fw_rev_id & 0x0000ff00) >> 8,
                 (qdev->fw_rev_id & 0x000000ff));
-       strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
        if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
index c92afcd912e23fe48a17c686e9355ba350585f15..b54898737284199183c1c6708b184763c648f057 100644 (file)
@@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
 
@@ -2323,7 +2323,8 @@ static void qlge_vlan_mode(struct net_device *ndev, u32 features)
        }
 }
 
-static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -2337,9 +2338,10 @@ static u32 qlge_fix_features(struct net_device *ndev, u32 features)
        return features;
 }
 
-static int qlge_set_features(struct net_device *ndev, u32 features)
+static int qlge_set_features(struct net_device *ndev,
+       netdev_features_t features)
 {
-       u32 changed = ndev->features ^ features;
+       netdev_features_t changed = ndev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                qlge_vlan_mode(ndev, features);
@@ -2347,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev, u32 features)
        return 0;
 }
 
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
 {
        u32 enable_bit = MAC_ADDR_E;
+       int err;
 
-       if (ql_set_mac_addr_reg
-           (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
                netif_err(qdev, ifup, qdev->ndev,
                          "Failed to init vlan address.\n");
-       }
+       return err;
 }
 
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
+       int err;
 
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
-               return;
+               return status;
 
-       __qlge_vlan_rx_add_vid(qdev, vid);
+       err = __qlge_vlan_rx_add_vid(qdev, vid);
        set_bit(vid, qdev->active_vlans);
 
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
 }
 
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
 {
        u32 enable_bit = 0;
+       int err;
 
-       if (ql_set_mac_addr_reg
-           (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
                netif_err(qdev, ifup, qdev->ndev,
                          "Failed to clear vlan address.\n");
-       }
+       return err;
 }
 
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
+       int err;
 
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
-               return;
+               return status;
 
-       __qlge_vlan_rx_kill_vid(qdev, vid);
+       err = __qlge_vlan_rx_kill_vid(qdev, vid);
        clear_bit(vid, qdev->active_vlans);
 
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
 }
 
 static void qlge_restore_vlan(struct ql_adapter *qdev)
index 1fc01ca72b466095b682f209b266f7209f8d3ec1..87aa43935070839e6734b55e5e4d7fc210eea213 100644 (file)
 #define DRV_VERSION    "0.28"
 #define DRV_RELDATE    "07Oct2011"
 
-/* PHY CHIP Address */
-#define PHY1_ADDR      1       /* For MAC1 */
-#define PHY2_ADDR      3       /* For MAC2 */
-#define PHY_MODE       0x3100  /* PHY CHIP Register 0 */
-#define PHY_CAP                0x01E1  /* PHY CHIP Register 4 */
-
 /* Time in jiffies before concluding the transmitter is hung. */
 #define TX_TIMEOUT     (6000 * HZ / 1000)
 
 
 /* MAC registers */
 #define MCR0           0x00    /* Control register 0 */
+#define  MCR0_RCVEN    0x0002  /* Receive enable */
 #define  MCR0_PROMISC  0x0020  /* Promiscuous mode */
 #define  MCR0_HASH_EN  0x0100  /* Enable multicast hash table function */
+#define  MCR0_XMTEN    0x1000  /* Transmission enable */
+#define  MCR0_FD       0x8000  /* Full/Half duplex */
 #define MCR1           0x04    /* Control register 1 */
 #define  MAC_RST       0x0001  /* Reset the MAC */
 #define MBCR           0x08    /* Bus control */
 #define PHY_CC         0x88    /* PHY status change configuration register */
 #define PHY_ST         0x8A    /* PHY status register */
 #define MAC_SM         0xAC    /* MAC status machine */
+#define  MAC_SM_RST    0x0002  /* MAC status machine reset */
 #define MAC_ID         0xBE    /* Identifier register */
 
 #define TX_DCNT                0x80    /* TX descriptor count */
 #define DSC_RX_MIDH_HIT        0x0004  /* RX MID table hit (no error) */
 #define DSC_RX_IDX_MID_MASK 3  /* RX mask for the index of matched MIDx */
 
-/* PHY settings */
-#define ICPLUS_PHY_ID  0x0243
-
 MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
        "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
        "Florian Fainelli <florian@openwrt.org>");
@@ -178,7 +173,7 @@ struct r6040_descriptor {
        struct r6040_descriptor *vndescp;       /* 14-17 */
        struct sk_buff *skb_ptr;        /* 18-1B */
        u32     rev2;                   /* 1C-1F */
-} __attribute__((aligned(32)));
+} __aligned(32);
 
 struct r6040_private {
        spinlock_t lock;                /* driver lock */
@@ -191,7 +186,7 @@ struct r6040_private {
        struct r6040_descriptor *tx_ring;
        dma_addr_t rx_ring_dma;
        dma_addr_t tx_ring_dma;
-       u16     tx_free_desc, phy_addr;
+       u16     tx_free_desc;
        u16     mcr0, mcr1;
        struct net_device *dev;
        struct mii_bus *mii_bus;
@@ -206,8 +201,6 @@ static char version[] __devinitdata = DRV_NAME
        ": RDC R6040 NAPI net driver,"
        "version "DRV_VERSION " (" DRV_RELDATE ")";
 
-static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
-
 /* Read a word data from PHY Chip */
 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 {
@@ -379,11 +372,11 @@ static void r6040_init_mac_regs(struct net_device *dev)
        iowrite16(MAC_RST, ioaddr + MCR1);
        while (limit--) {
                cmd = ioread16(ioaddr + MCR1);
-               if (cmd & 0x1)
+               if (cmd & MAC_RST)
                        break;
        }
        /* Reset internal state machine */
-       iowrite16(2, ioaddr + MAC_SM);
+       iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
        iowrite16(0, ioaddr + MAC_SM);
        mdelay(5);
 
@@ -409,7 +402,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
        iowrite16(INT_MASK, ioaddr + MIER);
 
        /* Enable TX and RX */
-       iowrite16(lp->mcr0 | 0x0002, ioaddr);
+       iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
 
        /* Let TX poll the descriptors
         * we may got called by r6040_tx_timeout which has left
@@ -461,7 +454,7 @@ static void r6040_down(struct net_device *dev)
        iowrite16(MAC_RST, ioaddr + MCR1);      /* Reset RDC MAC */
        while (limit--) {
                cmd = ioread16(ioaddr + MCR1);
-               if (cmd & 0x1)
+               if (cmd & MAC_RST)
                        break;
        }
 
@@ -742,9 +735,10 @@ static void r6040_mac_address(struct net_device *dev)
        void __iomem *ioaddr = lp->base;
        u16 *adrp;
 
-       /* MAC operation register */
-       iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
-       iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
+       /* Reset MAC */
+       iowrite16(MAC_RST, ioaddr + MCR1);
+       /* Reset internal state machine */
+       iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
        iowrite16(0, ioaddr + MAC_SM);
        mdelay(5);
 
@@ -940,7 +934,7 @@ static void r6040_multicast_list(struct net_device *dev)
        iowrite16(lp->mcr0, ioaddr + MCR0);
 
        /* Fill the MAC hash tables with their values */
-       if (lp->mcr0 && MCR0_HASH_EN) {
+       if (lp->mcr0 & MCR0_HASH_EN) {
                iowrite16(hash_table[0], ioaddr + MAR0);
                iowrite16(hash_table[1], ioaddr + MAR1);
                iowrite16(hash_table[2], ioaddr + MAR2);
@@ -1013,7 +1007,7 @@ static void r6040_adjust_link(struct net_device *dev)
 
        /* reflect duplex change */
        if (phydev->link && (lp->old_duplex != phydev->duplex)) {
-               lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+               lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
                iowrite16(lp->mcr0, ioaddr);
 
                status_changed = 1;
@@ -1166,8 +1160,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
        lp->dev = dev;
 
        /* Init RDC private data */
-       lp->mcr0 = 0x1002;
-       lp->phy_addr = phy_table[card_idx];
+       lp->mcr0 = MCR0_XMTEN | MCR0;
 
        /* The RDC-specific entries in the device structure. */
        dev->netdev_ops = &r6040_netdev_ops;
@@ -1188,7 +1181,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
        lp->mii_bus->write = r6040_mdiobus_write;
        lp->mii_bus->reset = r6040_mdiobus_reset;
        lp->mii_bus->name = "r6040_eth_mii";
-       snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+       snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+               dev_name(&pdev->dev), card_idx);
        lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
        if (!lp->mii_bus->irq) {
                dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
index ee5da9293ce00f386af069dd0919849eec161480..cc6b391479ca62ff3aaaabbe3f2c6ec09a5740df 100644 (file)
@@ -859,7 +859,6 @@ static void __cp_set_rx_mode (struct net_device *dev)
        struct cp_private *cp = netdev_priv(dev);
        u32 mc_filter[2];       /* Multicast hash filter */
        int rx_mode;
-       u32 tmp;
 
        /* Note: do not reorder, GCC is clever about common statements. */
        if (dev->flags & IFF_PROMISC) {
@@ -886,11 +885,9 @@ static void __cp_set_rx_mode (struct net_device *dev)
        }
 
        /* We can safely update without stopping the chip. */
-       tmp = cp_rx_config | rx_mode;
-       if (cp->rx_config != tmp) {
-               cpw32_f (RxConfig, tmp);
-               cp->rx_config = tmp;
-       }
+       cp->rx_config = cp_rx_config | rx_mode;
+       cpw32_f(RxConfig, cp->rx_config);
+
        cpw32_f (MAR0 + 0, mc_filter[0]);
        cpw32_f (MAR0 + 4, mc_filter[1]);
 }
@@ -1319,9 +1316,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct cp_private *cp = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(cp->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
 }
 
 static void cp_get_ringparam(struct net_device *dev,
@@ -1392,7 +1389,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
        cp->msg_enable = value;
 }
 
-static int cp_set_features(struct net_device *dev, u32 features)
+static int cp_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct cp_private *cp = netdev_priv(dev);
        unsigned long flags;
@@ -1589,7 +1586,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
  */
 
-#define eeprom_delay() readl(ee_addr)
+#define eeprom_delay() readb(ee_addr)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_EXTEND_CMD  (4)
index 4d6b254fc6c16de399ac168490089c1cd691625f..a8779bedb3d9afbd3125a09f13ef0ce93f473f1a 100644 (file)
@@ -1122,7 +1122,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
  */
 
-#define eeprom_delay() (void)RTL_R32(Cfg9346)
+#define eeprom_delay() (void)RTL_R8(Cfg9346)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_WRITE_CMD   (5)
@@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct rtl8139_private *tp = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(tp->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
        info->regdump_len = tp->regs_len;
 }
 
index 92b45f08858fab787a7ac3de3297f9b8c318ea67..7a0c800b50adc90051ed008548a754f1af6b5695 100644 (file)
@@ -69,9 +69,6 @@
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
 static const int multicast_filter_limit = 32;
 
-/* MAC address length */
-#define MAC_ADDR_LEN   6
-
 #define MAX_READ_REQUEST_SHIFT 12
 #define TX_DMA_BURST   6       /* Maximum PCI burst, '6' is 1024 */
 #define SafeMtu                0x1c20  /* ... actually life sucks beyond ~7k */
@@ -477,7 +474,6 @@ enum rtl_register_content {
        /* Config1 register p.24 */
        LEDS1           = (1 << 7),
        LEDS0           = (1 << 6),
-       MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
        Speed_down      = (1 << 4),
        MEMMAP          = (1 << 3),
        IOMAP           = (1 << 2),
@@ -485,6 +481,7 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
+       MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
 
@@ -1183,11 +1180,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
        return value;
 }
 
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
 {
-       RTL_W16(IntrMask, 0x0000);
+       void __iomem *ioaddr = tp->mmio_addr;
 
-       RTL_W16(IntrStatus, 0xffff);
+       RTL_W16(IntrMask, 0x0000);
+       RTL_W16(IntrStatus, tp->intr_event);
+       RTL_R8(ChipCmd);
 }
 
 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -1292,7 +1291,7 @@ static void __rtl8169_check_link_status(struct net_device *dev,
                netif_carrier_off(dev);
                netif_info(tp, ifdown, dev, "link down\n");
                if (pm)
-                       pm_schedule_suspend(&tp->pci_dev->dev, 100);
+                       pm_schedule_suspend(&tp->pci_dev->dev, 5000);
        }
        spin_unlock_irqrestore(&tp->lock, flags);
 }
@@ -1404,12 +1403,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
        struct rtl8169_private *tp = netdev_priv(dev);
        struct rtl_fw *rtl_fw = tp->rtl_fw;
 
-       strcpy(info->driver, MODULENAME);
-       strcpy(info->version, RTL8169_VERSION);
-       strcpy(info->bus_info, pci_name(tp->pci_dev));
+       strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+       strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
        BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
-       strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
-              rtl_fw->version);
+       if (!IS_ERR_OR_NULL(rtl_fw))
+               strlcpy(info->fw_version, rtl_fw->version,
+                       sizeof(info->fw_version));
 }
 
 static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1553,7 +1553,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        return ret;
 }
 
-static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t rtl8169_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
@@ -1567,7 +1568,8 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int rtl8169_set_features(struct net_device *dev, u32 features)
+static int rtl8169_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
@@ -3424,22 +3426,24 @@ static const struct rtl_cfg_info {
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
                            const struct rtl_cfg_info *cfg)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        unsigned msi = 0;
        u8 cfg2;
 
        cfg2 = RTL_R8(Config2) & ~MSIEnable;
        if (cfg->features & RTL_FEATURE_MSI) {
-               if (pci_enable_msi(pdev)) {
-                       dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+               if (pci_enable_msi(tp->pci_dev)) {
+                       netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
                } else {
                        cfg2 |= MSIEnable;
                        msi = RTL_FEATURE_MSI;
                }
        }
-       RTL_W8(Config2, cfg2);
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+               RTL_W8(Config2, cfg2);
        return msi;
 }
 
@@ -3933,8 +3937,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
                        break;
                udelay(100);
        }
-
-       rtl8169_init_ring_indexes(tp);
 }
 
 static int __devinit
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
                tp->features |= RTL_FEATURE_WOL;
-       tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+       tp->features |= rtl_try_msi(tp, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        if (rtl_tbi_enabled(tp)) {
@@ -4099,7 +4101,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        spin_lock_init(&tp->lock);
 
        /* Get MAC address */
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = RTL_R8(MAC0 + i);
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
@@ -4339,7 +4341,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        void __iomem *ioaddr = tp->mmio_addr;
 
        /* Disable interrupts */
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        rtl_rx_close(tp);
 
@@ -4885,8 +4887,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
        RTL_W16(IntrMitigate, 0x5151);
 
        /* Work around for RxFIFO overflow. */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
-           tp->mac_version == RTL_GIGA_MAC_VER_22) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
                tp->intr_event |= RxFIFOOver | PCSTimeout;
                tp->intr_event &= ~RxOverflow;
        }
@@ -5076,6 +5077,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
 
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+               tp->intr_event &= ~RxFIFOOver;
+               tp->napi_event &= ~RxFIFOOver;
+       }
+
        if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
            tp->mac_version == RTL_GIGA_MAC_VER_16) {
                int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5348,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
        /* Wait for any pending NAPI task to complete */
        napi_disable(&tp->napi);
 
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        tp->intr_mask = 0xffff;
        RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5395,16 @@ static void rtl8169_reset_task(struct work_struct *work)
        if (!netif_running(dev))
                goto out_unlock;
 
+       rtl8169_hw_reset(tp);
+
        rtl8169_wait_for_quiescence(dev);
 
        for (i = 0; i < NUM_RX_DESC; i++)
                rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
 
        rtl8169_tx_clear(tp);
+       rtl8169_init_ring_indexes(tp);
 
-       rtl8169_hw_reset(tp);
        rtl_hw_start(dev);
        netif_wake_queue(dev);
        rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5415,6 @@ out_unlock:
 
 static void rtl8169_tx_timeout(struct net_device *dev)
 {
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       rtl8169_hw_reset(tp);
-
-       /* Let's wait a bit while any (async) irq lands on */
        rtl8169_schedule_work(dev, rtl8169_reset_task);
 }
 
@@ -5804,6 +5807,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
         */
        status = RTL_R16(IntrStatus);
        while (status && status != 0xffff) {
+               status &= tp->intr_event;
+               if (!status)
+                       break;
+
                handled = 1;
 
                /* Handle all of the error cases first. These will reset
@@ -5818,27 +5825,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                        switch (tp->mac_version) {
                        /* Work around for rx fifo overflow */
                        case RTL_GIGA_MAC_VER_11:
-                       case RTL_GIGA_MAC_VER_22:
-                       case RTL_GIGA_MAC_VER_26:
                                netif_stop_queue(dev);
                                rtl8169_tx_timeout(dev);
                                goto done;
-                       /* Testers needed. */
-                       case RTL_GIGA_MAC_VER_17:
-                       case RTL_GIGA_MAC_VER_19:
-                       case RTL_GIGA_MAC_VER_20:
-                       case RTL_GIGA_MAC_VER_21:
-                       case RTL_GIGA_MAC_VER_23:
-                       case RTL_GIGA_MAC_VER_24:
-                       case RTL_GIGA_MAC_VER_27:
-                       case RTL_GIGA_MAC_VER_28:
-                       case RTL_GIGA_MAC_VER_31:
-                       /* Experimental science. Pktgen proof. */
-                       case RTL_GIGA_MAC_VER_12:
-                       case RTL_GIGA_MAC_VER_25:
-                               if (status == RxFIFOOver)
-                                       goto done;
-                               break;
                        default:
                                break;
                        }
index 9b230740c6ab37cdbba640aa9435e6cee73d9add..ebfb682dfe5500f90609bdc74630320c80454c02 100644 (file)
@@ -1957,18 +1957,7 @@ static struct platform_driver sh_eth_driver = {
        },
 };
 
-static int __init sh_eth_init(void)
-{
-       return platform_driver_register(&sh_eth_driver);
-}
-
-static void __exit sh_eth_cleanup(void)
-{
-       platform_driver_unregister(&sh_eth_driver);
-}
-
-module_init(sh_eth_init);
-module_exit(sh_eth_cleanup);
+module_platform_driver(sh_eth_driver);
 
 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
index c3673f151a41f2001a0f46fddb22473c05be3cc0..f955a19eb22f28d39435dde9aab90cdad8381ea8 100644 (file)
@@ -834,23 +834,7 @@ static struct platform_driver sgiseeq_driver = {
        }
 };
 
-static int __init sgiseeq_module_init(void)
-{
-       if (platform_driver_register(&sgiseeq_driver)) {
-               printk(KERN_ERR "Driver registration failed\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static void __exit sgiseeq_module_exit(void)
-{
-       platform_driver_unregister(&sgiseeq_driver);
-}
-
-module_init(sgiseeq_module_init);
-module_exit(sgiseeq_module_exit);
+module_platform_driver(sgiseeq_driver);
 
 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
index d5731f1fe6d67dc3713070ec4e7733f4788cc35a..e43702f33b62932ace48dd4c613f34c497c20944 100644 (file)
@@ -1336,7 +1336,8 @@ static int efx_probe_nic(struct efx_nic *efx)
        if (efx->n_channels > 1)
                get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
        for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-               efx->rx_indir_table[i] = i % efx->n_rx_channels;
+               efx->rx_indir_table[i] =
+                       ethtool_rxfh_indir_default(i, efx->n_rx_channels);
 
        efx_set_channels(efx);
        netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1900,7 +1901,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
        /* Otherwise efx_start_port() will do this */
 }
 
-static int efx_set_features(struct net_device *net_dev, u32 data)
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
@@ -2235,9 +2236,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
                    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
         .driver_data = (unsigned long) &falcon_b0_nic_type},
-       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
+       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),  /* SFC9020 */
         .driver_data = (unsigned long) &siena_a0_nic_type},
-       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
+       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),  /* SFL9021 */
         .driver_data = (unsigned long) &siena_a0_nic_type},
        {0}                     /* end of list */
 };
index 4764793ed234ce43d222e5d895f15ae69384d9ee..a3541ac6ea01f6191cee44a7f69eb8583b11beed 100644 (file)
 #include "net_driver.h"
 #include "filter.h"
 
-/* PCI IDs */
-#define BETHPAGE_A_P_DEVID      0x0803
-#define SIENA_A_P_DEVID         0x0813
-
 /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
 #define EFX_MEM_BAR 2
 
@@ -65,13 +61,23 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 extern int efx_probe_filters(struct efx_nic *efx);
 extern void efx_restore_filters(struct efx_nic *efx);
 extern void efx_remove_filters(struct efx_nic *efx);
-extern int efx_filter_insert_filter(struct efx_nic *efx,
+extern s32 efx_filter_insert_filter(struct efx_nic *efx,
                                    struct efx_filter_spec *spec,
                                    bool replace);
-extern int efx_filter_remove_filter(struct efx_nic *efx,
-                                   struct efx_filter_spec *spec);
+extern int efx_filter_remove_id_safe(struct efx_nic *efx,
+                                    enum efx_filter_priority priority,
+                                    u32 filter_id);
+extern int efx_filter_get_filter_safe(struct efx_nic *efx,
+                                     enum efx_filter_priority priority,
+                                     u32 filter_id, struct efx_filter_spec *);
 extern void efx_filter_clear_rx(struct efx_nic *efx,
                                enum efx_filter_priority priority);
+extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
+                                   enum efx_filter_priority priority);
+extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+                                enum efx_filter_priority priority,
+                                u32 *buf, u32 size);
 #ifdef CONFIG_RFS_ACCEL
 extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
                          u16 rxq_index, u32 flow_id);
index f3cd96dfa398f01202b7af6d3c63d6f86a5e3c95..29b2ebfef19f21d85bb61fca9abfe086b88774b0 100644 (file)
@@ -818,9 +818,58 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
        return efx_reset(efx, rc);
 }
 
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+                                     struct ethtool_rx_flow_spec *rule)
+{
+       struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+       struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+       struct efx_filter_spec spec;
+       u16 vid;
+       u8 proto;
+       int rc;
+
+       rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+                                       rule->location, &spec);
+       if (rc)
+               return rc;
+
+       if (spec.dmaq_id == 0xfff)
+               rule->ring_cookie = RX_CLS_FLOW_DISC;
+       else
+               rule->ring_cookie = spec.dmaq_id;
+
+       rc = efx_filter_get_eth_local(&spec, &vid,
+                                     rule->h_u.ether_spec.h_dest);
+       if (rc == 0) {
+               rule->flow_type = ETHER_FLOW;
+               memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN);
+               if (vid != EFX_FILTER_VID_UNSPEC) {
+                       rule->flow_type |= FLOW_EXT;
+                       rule->h_ext.vlan_tci = htons(vid);
+                       rule->m_ext.vlan_tci = htons(0xfff);
+               }
+               return 0;
+       }
+
+       rc = efx_filter_get_ipv4_local(&spec, &proto,
+                                      &ip_entry->ip4dst, &ip_entry->pdst);
+       if (rc != 0) {
+               rc = efx_filter_get_ipv4_full(
+                       &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
+                       &ip_entry->ip4dst, &ip_entry->pdst);
+               EFX_WARN_ON_PARANOID(rc);
+               ip_mask->ip4src = ~0;
+               ip_mask->psrc = ~0;
+       }
+       rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
+       ip_mask->ip4dst = ~0;
+       ip_mask->pdst = ~0;
+       return rc;
+}
+
 static int
 efx_ethtool_get_rxnfc(struct net_device *net_dev,
-                     struct ethtool_rxnfc *info, u32 *rules __always_unused)
+                     struct ethtool_rxnfc *info, u32 *rule_locs)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
@@ -862,42 +911,80 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
                return 0;
        }
 
+       case ETHTOOL_GRXCLSRLCNT:
+               info->data = efx_filter_get_rx_id_limit(efx);
+               if (info->data == 0)
+                       return -EOPNOTSUPP;
+               info->data |= RX_CLS_LOC_SPECIAL;
+               info->rule_cnt =
+                       efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+               return 0;
+
+       case ETHTOOL_GRXCLSRULE:
+               if (efx_filter_get_rx_id_limit(efx) == 0)
+                       return -EOPNOTSUPP;
+               return efx_ethtool_get_class_rule(efx, &info->fs);
+
+       case ETHTOOL_GRXCLSRLALL: {
+               s32 rc;
+               info->data = efx_filter_get_rx_id_limit(efx);
+               if (info->data == 0)
+                       return -EOPNOTSUPP;
+               rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+                                          rule_locs, info->rule_cnt);
+               if (rc < 0)
+                       return rc;
+               info->rule_cnt = rc;
+               return 0;
+       }
+
        default:
                return -EOPNOTSUPP;
        }
 }
 
-static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
-                                    struct ethtool_rx_ntuple *ntuple)
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+                                     struct ethtool_rx_flow_spec *rule)
 {
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
-       struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
-       struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
-       struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
-       struct efx_filter_spec filter;
+       struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+       struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+       struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+       struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+       struct efx_filter_spec spec;
        int rc;
 
-       /* Range-check action */
-       if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
-           ntuple->fs.action >= (s32)efx->n_rx_channels)
+       /* Check that user wants us to choose the location */
+       if (rule->location != RX_CLS_LOC_ANY &&
+           rule->location != RX_CLS_LOC_FIRST &&
+           rule->location != RX_CLS_LOC_LAST)
+               return -EINVAL;
+
+       /* Range-check ring_cookie */
+       if (rule->ring_cookie >= efx->n_rx_channels &&
+           rule->ring_cookie != RX_CLS_FLOW_DISC)
                return -EINVAL;
 
-       if (~ntuple->fs.data_mask)
+       /* Check for unsupported extensions */
+       if ((rule->flow_type & FLOW_EXT) &&
+           (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
+            rule->m_ext.data[1]))
                return -EINVAL;
 
-       efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
-                          (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
-                          0xfff : ntuple->fs.action);
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+                          (rule->location == RX_CLS_LOC_FIRST) ?
+                          EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+                          (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+                          0xfff : rule->ring_cookie);
 
-       switch (ntuple->fs.flow_type) {
+       switch (rule->flow_type) {
        case TCP_V4_FLOW:
        case UDP_V4_FLOW: {
-               u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+               u8 proto = (rule->flow_type == TCP_V4_FLOW ?
                            IPPROTO_TCP : IPPROTO_UDP);
 
                /* Must match all of destination, */
-               if (ip_mask->ip4dst | ip_mask->pdst)
+               if ((__force u32)~ip_mask->ip4dst |
+                   (__force u16)~ip_mask->pdst)
                        return -EINVAL;
                /* all or none of source, */
                if ((ip_mask->ip4src | ip_mask->psrc) &&
@@ -905,17 +992,17 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
                     (__force u16)~ip_mask->psrc))
                        return -EINVAL;
                /* and nothing else */
-               if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+               if (ip_mask->tos | rule->m_ext.vlan_tci)
                        return -EINVAL;
 
-               if (!ip_mask->ip4src)
-                       rc = efx_filter_set_ipv4_full(&filter, proto,
+               if (ip_mask->ip4src)
+                       rc = efx_filter_set_ipv4_full(&spec, proto,
                                                      ip_entry->ip4dst,
                                                      ip_entry->pdst,
                                                      ip_entry->ip4src,
                                                      ip_entry->psrc);
                else
-                       rc = efx_filter_set_ipv4_local(&filter, proto,
+                       rc = efx_filter_set_ipv4_local(&spec, proto,
                                                       ip_entry->ip4dst,
                                                       ip_entry->pdst);
                if (rc)
@@ -923,23 +1010,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
                break;
        }
 
-       case ETHER_FLOW:
-               /* Must match all of destination, */
-               if (!is_zero_ether_addr(mac_mask->h_dest))
+       case ETHER_FLOW | FLOW_EXT:
+               /* Must match all or none of VID */
+               if (rule->m_ext.vlan_tci != htons(0xfff) &&
+                   rule->m_ext.vlan_tci != 0)
                        return -EINVAL;
-               /* all or none of VID, */
-               if (ntuple->fs.vlan_tag_mask != 0xf000 &&
-                   ntuple->fs.vlan_tag_mask != 0xffff)
+       case ETHER_FLOW:
+               /* Must match all of destination */
+               if (!is_broadcast_ether_addr(mac_mask->h_dest))
                        return -EINVAL;
                /* and nothing else */
-               if (!is_broadcast_ether_addr(mac_mask->h_source) ||
-                   mac_mask->h_proto != htons(0xffff))
+               if (!is_zero_ether_addr(mac_mask->h_source) ||
+                   mac_mask->h_proto)
                        return -EINVAL;
 
                rc = efx_filter_set_eth_local(
-                       &filter,
-                       (ntuple->fs.vlan_tag_mask == 0xf000) ?
-                       ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+                       &spec,
+                       (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ?
+                       ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
                        mac_entry->h_dest);
                if (rc)
                        return rc;
@@ -949,47 +1037,57 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
                return -EINVAL;
        }
 
-       if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
-               return efx_filter_remove_filter(efx, &filter);
+       rc = efx_filter_insert_filter(efx, &spec, true);
+       if (rc < 0)
+               return rc;
 
-       rc = efx_filter_insert_filter(efx, &filter, true);
-       return rc < 0 ? rc : 0;
+       rule->location = rc;
+       return 0;
 }
 
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
-                                     struct ethtool_rxfh_indir *indir)
+static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+                                struct ethtool_rxnfc *info)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       size_t copy_size =
-               min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
 
-       if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+       if (efx_filter_get_rx_id_limit(efx) == 0)
                return -EOPNOTSUPP;
 
-       indir->size = ARRAY_SIZE(efx->rx_indir_table);
-       memcpy(indir->ring_index, efx->rx_indir_table,
-              copy_size * sizeof(indir->ring_index[0]));
-       return 0;
+       switch (info->cmd) {
+       case ETHTOOL_SRXCLSRLINS:
+               return efx_ethtool_set_class_rule(efx, &info->fs);
+
+       case ETHTOOL_SRXCLSRLDEL:
+               return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+                                                info->fs.location);
+
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
-                                     const struct ethtool_rxfh_indir *indir)
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       size_t i;
 
-       if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
-               return -EOPNOTSUPP;
+       return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+               0 : ARRAY_SIZE(efx->rx_indir_table));
+}
 
-       /* Validate size and indices */
-       if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
-               return -EINVAL;
-       for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-               if (indir->ring_index[i] >= efx->n_rx_channels)
-                       return -EINVAL;
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+       return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+                                     const u32 *indir)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
 
-       memcpy(efx->rx_indir_table, indir->ring_index,
-              sizeof(efx->rx_indir_table));
+       memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
        efx_nic_push_rx_indir_table(efx);
        return 0;
 }
@@ -1019,7 +1117,8 @@ const struct ethtool_ops efx_ethtool_ops = {
        .set_wol                = efx_ethtool_set_wol,
        .reset                  = efx_ethtool_reset,
        .get_rxnfc              = efx_ethtool_get_rxnfc,
-       .set_rx_ntuple          = efx_ethtool_set_rx_ntuple,
+       .set_rxnfc              = efx_ethtool_set_rxnfc,
+       .get_rxfh_indir_size    = efx_ethtool_get_rxfh_indir_size,
        .get_rxfh_indir         = efx_ethtool_get_rxfh_indir,
        .set_rxfh_indir         = efx_ethtool_set_rxfh_indir,
 };
index 97b606b92e881eb6aeda796ec7286834f3422041..8ae1ebd3539796870baaab658c07ea45e0eccac3 100644 (file)
@@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
        if (!nic_data->stats_pending)
                return;
 
-       nic_data->stats_pending = 0;
+       nic_data->stats_pending = false;
        if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
                rmb(); /* read the done flag before the stats */
                efx->mac_op->update_stats(efx);
index 2b9636f96e05769a5bba61ded58acf2b98a115ca..1fbbbee7b1ae225bdd54433ec04e354f5ff1182f 100644 (file)
@@ -155,6 +155,16 @@ static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
        spec->data[2] = ntohl(host2);
 }
 
+static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
+                                        __be32 *host1, __be16 *port1,
+                                        __be32 *host2, __be16 *port2)
+{
+       *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+       *port1 = htons(spec->data[0]);
+       *host2 = htonl(spec->data[2]);
+       *port2 = htons(spec->data[1] >> 16);
+}
+
 /**
  * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
  * @spec: Specification to initialise
@@ -205,6 +215,26 @@ int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
        return 0;
 }
 
+int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+                             u8 *proto, __be32 *host, __be16 *port)
+{
+       __be32 host1;
+       __be16 port1;
+
+       switch (spec->type) {
+       case EFX_FILTER_TCP_WILD:
+               *proto = IPPROTO_TCP;
+               __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
+               return 0;
+       case EFX_FILTER_UDP_WILD:
+               *proto = IPPROTO_UDP;
+               __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 /**
  * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
  * @spec: Specification to initialise
@@ -242,6 +272,25 @@ int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
        return 0;
 }
 
+int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+                            u8 *proto, __be32 *host, __be16 *port,
+                            __be32 *rhost, __be16 *rport)
+{
+       switch (spec->type) {
+       case EFX_FILTER_TCP_FULL:
+               *proto = IPPROTO_TCP;
+               break;
+       case EFX_FILTER_UDP_FULL:
+               *proto = IPPROTO_UDP;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       __efx_filter_get_ipv4(spec, rhost, rport, host, port);
+       return 0;
+}
+
 /**
  * efx_filter_set_eth_local - specify local Ethernet address and optional VID
  * @spec: Specification to initialise
@@ -270,6 +319,29 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec,
        return 0;
 }
 
+int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+                            u16 *vid, u8 *addr)
+{
+       switch (spec->type) {
+       case EFX_FILTER_MAC_WILD:
+               *vid = EFX_FILTER_VID_UNSPEC;
+               break;
+       case EFX_FILTER_MAC_FULL:
+               *vid = spec->data[0];
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       addr[0] = spec->data[2] >> 8;
+       addr[1] = spec->data[2];
+       addr[2] = spec->data[1] >> 24;
+       addr[3] = spec->data[1] >> 16;
+       addr[4] = spec->data[1] >> 8;
+       addr[5] = spec->data[1];
+       return 0;
+}
+
 /* Build a filter entry and return its n-tuple key. */
 static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
 {
@@ -332,7 +404,7 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
 
 static int efx_filter_search(struct efx_filter_table *table,
                             struct efx_filter_spec *spec, u32 key,
-                            bool for_insert, int *depth_required)
+                            bool for_insert, unsigned int *depth_required)
 {
        unsigned hash, incr, filter_idx, depth, depth_max;
 
@@ -366,12 +438,59 @@ static int efx_filter_search(struct efx_filter_table *table,
        }
 }
 
-/* Construct/deconstruct external filter IDs */
+/*
+ * Construct/deconstruct external filter IDs.  These must be ordered
+ * by matching priority, for RX NFC semantics.
+ *
+ * Each RX MAC filter entry has a flag for whether it can override an
+ * RX IP filter that also matches.  So we assign locations for MAC
+ * filters with overriding behaviour, then for IP filters, then for
+ * MAC filters without overriding behaviour.
+ */
+
+#define EFX_FILTER_INDEX_WIDTH 13
+#define EFX_FILTER_INDEX_MASK  ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
+                                    unsigned int index, u8 flags)
+{
+       return (table_id == EFX_FILTER_TABLE_RX_MAC &&
+               flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ?
+               index :
+               (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
+{
+       return (id <= EFX_FILTER_INDEX_MASK) ?
+               EFX_FILTER_TABLE_RX_MAC :
+               (id >> EFX_FILTER_INDEX_WIDTH) - 1;
+}
+
+static inline unsigned int efx_filter_id_index(u32 id)
+{
+       return id & EFX_FILTER_INDEX_MASK;
+}
 
-static inline int
-efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+static inline u8 efx_filter_id_flags(u32 id)
 {
-       return table_id << 16 | index;
+       return (id <= EFX_FILTER_INDEX_MASK) ?
+               EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP :
+               EFX_FILTER_FLAG_RX;
+}
+
+u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+       struct efx_filter_state *state = efx->filter_state;
+
+       if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0)
+               return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH)
+                       + state->table[EFX_FILTER_TABLE_RX_MAC].size;
+       else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0)
+               return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH)
+                       + state->table[EFX_FILTER_TABLE_RX_IP].size;
+       else
+               return 0;
 }
 
 /**
@@ -384,14 +503,14 @@ efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
  * On success, return the filter ID.
  * On failure, return a negative error code.
  */
-int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
                             bool replace)
 {
        struct efx_filter_state *state = efx->filter_state;
        struct efx_filter_table *table = efx_filter_spec_table(state, spec);
        struct efx_filter_spec *saved_spec;
        efx_oword_t filter;
-       int filter_idx, depth;
+       unsigned int filter_idx, depth;
        u32 key;
        int rc;
 
@@ -439,7 +558,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
        netif_vdbg(efx, hw, efx->net_dev,
                   "%s: filter type %d index %d rxq %u set",
                   __func__, spec->type, filter_idx, spec->dmaq_id);
-       rc = efx_filter_make_id(table->id, filter_idx);
+       rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
 
 out:
        spin_unlock_bh(&state->lock);
@@ -448,7 +567,7 @@ out:
 
 static void efx_filter_table_clear_entry(struct efx_nic *efx,
                                         struct efx_filter_table *table,
-                                        int filter_idx)
+                                        unsigned int filter_idx)
 {
        static efx_oword_t filter;
 
@@ -463,48 +582,101 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
 }
 
 /**
- * efx_filter_remove_filter - remove a filter by specification
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
  * @efx: NIC from which to remove the filter
- * @spec: Specification for the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
  *
- * On success, return zero.
- * On failure, return a negative error code.
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
  */
-int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+int efx_filter_remove_id_safe(struct efx_nic *efx,
+                             enum efx_filter_priority priority,
+                             u32 filter_id)
 {
        struct efx_filter_state *state = efx->filter_state;
-       struct efx_filter_table *table = efx_filter_spec_table(state, spec);
-       struct efx_filter_spec *saved_spec;
-       efx_oword_t filter;
-       int filter_idx, depth;
-       u32 key;
+       enum efx_filter_table_id table_id;
+       struct efx_filter_table *table;
+       unsigned int filter_idx;
+       struct efx_filter_spec *spec;
+       u8 filter_flags;
        int rc;
 
-       if (!table)
-               return -EINVAL;
+       table_id = efx_filter_id_table_id(filter_id);
+       if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+               return -ENOENT;
+       table = &state->table[table_id];
 
-       key = efx_filter_build(&filter, spec);
+       filter_idx = efx_filter_id_index(filter_id);
+       if (filter_idx >= table->size)
+               return -ENOENT;
+       spec = &table->spec[filter_idx];
 
-       spin_lock_bh(&state->lock);
+       filter_flags = efx_filter_id_flags(filter_id);
 
-       rc = efx_filter_search(table, spec, key, false, &depth);
-       if (rc < 0)
-               goto out;
-       filter_idx = rc;
-       saved_spec = &table->spec[filter_idx];
+       spin_lock_bh(&state->lock);
 
-       if (spec->priority < saved_spec->priority) {
-               rc = -EPERM;
-               goto out;
+       if (test_bit(filter_idx, table->used_bitmap) &&
+           spec->priority == priority && spec->flags == filter_flags) {
+               efx_filter_table_clear_entry(efx, table, filter_idx);
+               if (table->used == 0)
+                       efx_filter_table_reset_search_depth(table);
+               rc = 0;
+       } else {
+               rc = -ENOENT;
        }
 
-       efx_filter_table_clear_entry(efx, table, filter_idx);
-       if (table->used == 0)
-               efx_filter_table_reset_search_depth(table);
-       rc = 0;
+       spin_unlock_bh(&state->lock);
+
+       return rc;
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+int efx_filter_get_filter_safe(struct efx_nic *efx,
+                              enum efx_filter_priority priority,
+                              u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       enum efx_filter_table_id table_id;
+       struct efx_filter_table *table;
+       struct efx_filter_spec *spec;
+       unsigned int filter_idx;
+       u8 filter_flags;
+       int rc;
+
+       table_id = efx_filter_id_table_id(filter_id);
+       if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+               return -ENOENT;
+       table = &state->table[table_id];
+
+       filter_idx = efx_filter_id_index(filter_id);
+       if (filter_idx >= table->size)
+               return -ENOENT;
+       spec = &table->spec[filter_idx];
+
+       filter_flags = efx_filter_id_flags(filter_id);
+
+       spin_lock_bh(&state->lock);
+
+       if (test_bit(filter_idx, table->used_bitmap) &&
+           spec->priority == priority && spec->flags == filter_flags) {
+               *spec_buf = *spec;
+               rc = 0;
+       } else {
+               rc = -ENOENT;
+       }
 
-out:
        spin_unlock_bh(&state->lock);
+
        return rc;
 }
 
@@ -514,7 +686,7 @@ static void efx_filter_table_clear(struct efx_nic *efx,
 {
        struct efx_filter_state *state = efx->filter_state;
        struct efx_filter_table *table = &state->table[table_id];
-       int filter_idx;
+       unsigned int filter_idx;
 
        spin_lock_bh(&state->lock);
 
@@ -538,6 +710,68 @@ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
        efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
 }
 
+u32 efx_filter_count_rx_used(struct efx_nic *efx,
+                            enum efx_filter_priority priority)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       enum efx_filter_table_id table_id;
+       struct efx_filter_table *table;
+       unsigned int filter_idx;
+       u32 count = 0;
+
+       spin_lock_bh(&state->lock);
+
+       for (table_id = EFX_FILTER_TABLE_RX_IP;
+            table_id <= EFX_FILTER_TABLE_RX_MAC;
+            table_id++) {
+               table = &state->table[table_id];
+               for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+                       if (test_bit(filter_idx, table->used_bitmap) &&
+                           table->spec[filter_idx].priority == priority)
+                               ++count;
+               }
+       }
+
+       spin_unlock_bh(&state->lock);
+
+       return count;
+}
+
+s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+                         enum efx_filter_priority priority,
+                         u32 *buf, u32 size)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       enum efx_filter_table_id table_id;
+       struct efx_filter_table *table;
+       unsigned int filter_idx;
+       s32 count = 0;
+
+       spin_lock_bh(&state->lock);
+
+       for (table_id = EFX_FILTER_TABLE_RX_IP;
+            table_id <= EFX_FILTER_TABLE_RX_MAC;
+            table_id++) {
+               table = &state->table[table_id];
+               for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+                       if (test_bit(filter_idx, table->used_bitmap) &&
+                           table->spec[filter_idx].priority == priority) {
+                               if (count == size) {
+                                       count = -EMSGSIZE;
+                                       goto out;
+                               }
+                               buf[count++] = efx_filter_make_id(
+                                       table_id, filter_idx,
+                                       table->spec[filter_idx].flags);
+                       }
+               }
+       }
+out:
+       spin_unlock_bh(&state->lock);
+
+       return count;
+}
+
 /* Restore filter stater after reset */
 void efx_restore_filters(struct efx_nic *efx)
 {
@@ -545,7 +779,7 @@ void efx_restore_filters(struct efx_nic *efx)
        enum efx_filter_table_id table_id;
        struct efx_filter_table *table;
        efx_oword_t filter;
-       int filter_idx;
+       unsigned int filter_idx;
 
        spin_lock_bh(&state->lock);
 
index 872f2132a49626c2ff3a5ac5c99e32ae22e121b6..3d4108cd90caceac01834d6634a9566a9fcf0c33 100644 (file)
@@ -78,6 +78,11 @@ enum efx_filter_flags {
  *
  * Use the efx_filter_set_*() functions to initialise the @type and
  * @data fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one.  The hardware priority of a filter
+ * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
+ * flag.
  */
 struct efx_filter_spec {
        u8      type:4;
@@ -100,11 +105,18 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
 
 extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
                                     __be32 host, __be16 port);
+extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+                                    u8 *proto, __be32 *host, __be16 *port);
 extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
                                    __be32 host, __be16 port,
                                    __be32 rhost, __be16 rport);
+extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+                                   u8 *proto, __be32 *host, __be16 *port,
+                                   __be32 *rhost, __be16 *rport);
 extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
                                    u16 vid, const u8 *addr);
+extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+                                   u16 *vid, u8 *addr);
 enum {
        EFX_FILTER_VID_UNSPEC = 0xffff,
 };
index b6304486f2449437b5825169e06b77ad70ae6a97..bc9dcd6b30d7f6e290ab4236998e00a06af37e41 100644 (file)
@@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
                rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
                if (rc)
                        goto out;
-               part->mcdi.updating = 1;
+               part->mcdi.updating = true;
        }
 
        /* The MCDI interface can in fact do multiple erase blocks at once;
@@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
                rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
                if (rc)
                        goto out;
-               part->mcdi.updating = 1;
+               part->mcdi.updating = true;
        }
 
        while (offset < end) {
@@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
        int rc = 0;
 
        if (part->mcdi.updating) {
-               part->mcdi.updating = 0;
+               part->mcdi.updating = false;
                rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
        }
 
index b8e251a1ee48694063693698d83a25527b3c1f7c..c49502bab6a3a06803ce9dc9950220c9bd371355 100644 (file)
@@ -908,7 +908,7 @@ struct efx_nic_type {
        unsigned int phys_addr_channels;
        unsigned int tx_dc_base;
        unsigned int rx_dc_base;
-       u32 offload_features;
+       netdev_features_t offload_features;
 };
 
 /**************************************************************************
index 752d521c09b1a15400ada2d8ce6fce786f1c42c9..aca349861767fb793b5ba2251f88e87a743fe70c 100644 (file)
@@ -479,11 +479,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
                if (efx->net_dev->features & NETIF_F_RXHASH)
                        skb->rxhash = efx_rx_buf_hash(eh);
 
-               skb_frag_set_page(skb, 0, page);
-               skb_shinfo(skb)->frags[0].page_offset =
-                       efx_rx_buf_offset(efx, rx_buf);
-               skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
-               skb_shinfo(skb)->nr_frags = 1;
+               skb_fill_page_desc(skb, 0, page,
+                                  efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
 
                skb->len = rx_buf->len;
                skb->data_len = rx_buf->len;
@@ -669,7 +666,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
                  rx_queue->ptr_mask);
 
        /* Allocate RX buffers */
-       rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+       rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
                                   GFP_KERNEL);
        if (!rx_queue->buffer)
                return -ENOMEM;
index 822f6c2a6a7c1dee37d4809508ae23e015e9129a..52edd24fcde388cfc690e6ca1fe27bae0f20e32a 100644 (file)
@@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
                /* Determine how many packets to send */
                state->packet_count = efx->txq_entries / 3;
                state->packet_count = min(1 << (i << 2), state->packet_count);
-               state->skbs = kzalloc(sizeof(state->skbs[0]) *
-                                     state->packet_count, GFP_KERNEL);
+               state->skbs = kcalloc(state->packet_count,
+                                     sizeof(state->skbs[0]), GFP_KERNEL);
                if (!state->skbs)
                        return -ENOMEM;
                state->flush = false;
index cc2549cb70765e7483a20e3f6998e7abd92d820c..4d5d619feaa602842ef36a14911adb58cbffc23c 100644 (file)
@@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
 static int siena_probe_nic(struct efx_nic *efx)
 {
        struct siena_nic_data *nic_data;
-       bool already_attached = 0;
+       bool already_attached = false;
        efx_oword_t reg;
        int rc;
 
index df88c5430f956c6a2752a5dcdd40bcc3fe4066f1..72f0fbc73b1abb08cd514e012d41d0bce5e8d675 100644 (file)
@@ -31,7 +31,9 @@
 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
 
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
-                              struct efx_tx_buffer *buffer)
+                              struct efx_tx_buffer *buffer,
+                              unsigned int *pkts_compl,
+                              unsigned int *bytes_compl)
 {
        if (buffer->unmap_len) {
                struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
        }
 
        if (buffer->skb) {
+               (*pkts_compl)++;
+               (*bytes_compl) += buffer->skb->len;
                dev_kfree_skb_any((struct sk_buff *) buffer->skb);
                buffer->skb = NULL;
                netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        buffer->skb = skb;
        buffer->continuation = false;
 
+       netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
@@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
  unwind:
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
+               unsigned int pkts_compl = 0, bytes_compl = 0;
                --tx_queue->insert_count;
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
-               efx_dequeue_buffer(tx_queue, buffer);
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
                buffer->len = 0;
        }
 
@@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
  * specified index.
  */
 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
-                               unsigned int index)
+                               unsigned int index,
+                               unsigned int *pkts_compl,
+                               unsigned int *bytes_compl)
 {
        struct efx_nic *efx = tx_queue->efx;
        unsigned int stop_index, read_ptr;
@@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
                        return;
                }
 
-               efx_dequeue_buffer(tx_queue, buffer);
+               efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
                buffer->continuation = true;
                buffer->len = 0;
 
@@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned fill_level;
        struct efx_nic *efx = tx_queue->efx;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
 
        EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
 
-       efx_dequeue_buffers(tx_queue, index);
+       efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+       netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
        /* See if we need to restart the netif queue.  This barrier
         * separates the update of read_count from the test of the
@@ -468,7 +479,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
                  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
 
        /* Allocate software ring */
-       tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+       tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
                                   GFP_KERNEL);
        if (!tx_queue->buffer)
                return -ENOMEM;
@@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
 
        /* Free any buffers left in the ring */
        while (tx_queue->read_count != tx_queue->write_count) {
+               unsigned int pkts_compl = 0, bytes_compl = 0;
                buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
-               efx_dequeue_buffer(tx_queue, buffer);
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
                buffer->continuation = true;
                buffer->len = 0;
 
                ++tx_queue->read_count;
        }
+       netdev_tx_reset_queue(tx_queue->core_txq);
 }
 
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1160,6 +1173,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                        goto mem_err;
        }
 
+       netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
index 60135aa558025bc9f86b434aa5e8b330476dec82..53efe7c7b1c066e7ce749bcbdfe53a773d262874 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/tcp.h>         /* struct tcphdr */
 #include <linux/skbuff.h>
 #include <linux/mii.h>         /* MII definitions */
+#include <linux/crc32.h>
 
 #include <asm/ip32/mace.h>
 #include <asm/ip32/ip32_ints.h>
@@ -57,13 +58,20 @@ static const char *meth_str="SGI O2 Fast Ethernet";
 static int timeout = TX_TIMEOUT;
 module_param(timeout, int, 0);
 
+/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
+ */
+#define METH_MCF_LIMIT 32
+
 /*
  * This structure is private to each device. It is used to pass
  * packets in and out, so there is place for a packet
  */
 struct meth_private {
        /* in-memory copy of MAC Control register */
-       unsigned long mac_ctrl;
+       u64 mac_ctrl;
+
        /* in-memory copy of DMA Control register */
        unsigned long dma_ctrl;
        /* address of PHY, used by mdio_* functions, initialized in mdio_probe */
@@ -79,6 +87,9 @@ struct meth_private {
        struct sk_buff *rx_skbs[RX_RING_ENTRIES];
        unsigned long rx_write;
 
+       /* Multicast filter. */
+       u64 mcast_filter;
+
        spinlock_t meth_lock;
 };
 
@@ -765,6 +776,40 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        }
 }
 
+static void meth_set_rx_mode(struct net_device *dev)
+{
+       struct meth_private *priv = netdev_priv(dev);
+       unsigned long flags;
+
+       netif_stop_queue(dev);
+       spin_lock_irqsave(&priv->meth_lock, flags);
+       priv->mac_ctrl &= ~METH_PROMISC;
+
+       if (dev->flags & IFF_PROMISC) {
+               priv->mac_ctrl |= METH_PROMISC;
+               priv->mcast_filter = 0xffffffffffffffffUL;
+       } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
+                  (dev->flags & IFF_ALLMULTI)) {
+               priv->mac_ctrl |= METH_ACCEPT_AMCAST;
+               priv->mcast_filter = 0xffffffffffffffffUL;
+       } else {
+               struct netdev_hw_addr *ha;
+               priv->mac_ctrl |= METH_ACCEPT_MCAST;
+
+               netdev_for_each_mc_addr(ha, dev)
+                       set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
+                               (volatile unsigned long *)&priv->mcast_filter);
+       }
+
+       /* Write the changes to the chip registers. */
+       mace->eth.mac_ctrl = priv->mac_ctrl;
+       mace->eth.mcast_filter = priv->mcast_filter;
+
+       /* Done! */
+       spin_unlock_irqrestore(&priv->meth_lock, flags);
+       netif_wake_queue(dev);
+}
+
 static const struct net_device_ops meth_netdev_ops = {
        .ndo_open               = meth_open,
        .ndo_stop               = meth_release,
@@ -774,6 +819,7 @@ static const struct net_device_ops meth_netdev_ops = {
        .ndo_change_mtu         = eth_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_rx_mode        = meth_set_rx_mode,
 };
 
 /*
@@ -830,24 +876,7 @@ static struct platform_driver meth_driver = {
        }
 };
 
-static int __init meth_init_module(void)
-{
-       int err;
-
-       err = platform_driver_register(&meth_driver);
-       if (err)
-               printk(KERN_ERR "Driver registration failed\n");
-
-       return err;
-}
-
-static void __exit meth_exit_module(void)
-{
-       platform_driver_unregister(&meth_driver);
-}
-
-module_init(meth_init_module);
-module_exit(meth_exit_module);
+module_platform_driver(meth_driver);
 
 MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
 MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
index 1b4658c99391a4edbc09670f0145ec6d8bcbe31f..5b118cd5bf942c48e5d6be5ecf3e8f5b9c4c9559 100644 (file)
@@ -47,8 +47,6 @@
 #define sis190_rx_skb                  netif_rx
 #define sis190_rx_quota(count, quota)  count
 
-#define MAC_ADDR_LEN           6
-
 #define NUM_TX_DESC            64      /* [8..1024] */
 #define NUM_RX_DESC            64      /* [8..8192] */
 #define TX_RING_BYTES          (NUM_TX_DESC * sizeof(struct TxDesc))
@@ -1601,7 +1599,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
        }
 
        /* Get MAC address from EEPROM */
-       for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+       for (i = 0; i < ETH_ALEN / 2; i++) {
                u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
 
                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
@@ -1653,7 +1651,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
        udelay(50);
        pci_read_config_byte(isa_bridge, 0x48, &reg);
 
-        for (i = 0; i < MAC_ADDR_LEN; i++) {
+        for (i = 0; i < ETH_ALEN; i++) {
                 outb(0x9 + i, 0x78);
                 dev->dev_addr[i] = inb(0x79);
         }
@@ -1692,7 +1690,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
         */
        SIS_W16(RxMacControl, ctl & ~0x0f00);
 
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
 
        SIS_W16(RxMacControl, ctl);
@@ -1760,9 +1758,10 @@ static void sis190_get_drvinfo(struct net_device *dev,
 {
        struct sis190_private *tp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(tp->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(tp->pci_dev),
+               sizeof(info->bus_info));
 }
 
 static int sis190_get_regs_len(struct net_device *dev)
index a184abc5ef11802afcefc86300d3fda2be7b763f..c8efc708c792a63d47bb608d1ff16659613bd0c5 100644 (file)
@@ -1991,9 +1991,10 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
 
-       strcpy (info->driver, SIS900_MODULE_NAME);
-       strcpy (info->version, SIS900_DRV_VERSION);
-       strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+       strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+               sizeof(info->bus_info));
 }
 
 static u32 sis900_get_msglevel(struct net_device *net_dev)
index 0a5dfb814157f66343af36f4724718aa3503d4c0..2c077ce0b6d6416d56f200ba1b62a080ce1ac11e 100644 (file)
@@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
 {
        struct epic_private *np = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index 8f61fe9db1d0b6f10d86067c56ceac2788f94d87..313ba3b32ab476be80d43f4b4773271f1b17760e 100644 (file)
@@ -2196,15 +2196,4 @@ static struct platform_driver smc911x_driver = {
        },
 };
 
-static int __init smc911x_init(void)
-{
-       return platform_driver_register(&smc911x_driver);
-}
-
-static void __exit smc911x_cleanup(void)
-{
-       platform_driver_unregister(&smc911x_driver);
-}
-
-module_init(smc911x_init);
-module_exit(smc911x_cleanup);
+module_platform_driver(smc911x_driver);
index cbfa981871314a4e5605117b88b275df9eb215ff..ada927aba7a517f64804b06852e33f1f4c2ede39 100644 (file)
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
 
 static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
index f47f81e25322942d18abd0c929cb17ca2730a7db..64ad3ed74495f171e29ff5d4e88c1d1efeab8a51 100644 (file)
@@ -2417,15 +2417,4 @@ static struct platform_driver smc_driver = {
        },
 };
 
-static int __init smc_init(void)
-{
-       return platform_driver_register(&smc_driver);
-}
-
-static void __exit smc_cleanup(void)
-{
-       platform_driver_unregister(&smc_driver);
-}
-
-module_init(smc_init);
-module_exit(smc_cleanup);
+module_platform_driver(smc_driver);
index d2be42aafbef201e48ea5d1fb177564aaeaacf31..9d0b8ced0234b5ae7468abcf732af6b080618d8d 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/bug.h>
@@ -88,6 +89,8 @@ struct smsc911x_ops {
                                unsigned int *buf, unsigned int wordcount);
 };
 
+#define SMSC911X_NUM_SUPPLIES 2
+
 struct smsc911x_data {
        void __iomem *ioaddr;
 
@@ -138,6 +141,9 @@ struct smsc911x_data {
 
        /* register access functions */
        const struct smsc911x_ops *ops;
+
+       /* regulators */
+       struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
 };
 
 /* Easy access to information */
@@ -362,6 +368,76 @@ out:
        spin_unlock_irqrestore(&pdata->dev_lock, flags);
 }
 
+/*
+ * enable resources, currently just regulators.
+ */
+static int smsc911x_enable_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+       int ret = 0;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+       if (ret)
+               netdev_err(ndev, "failed to enable regulators %d\n",
+                               ret);
+       return ret;
+}
+
+/*
+ * disable resources, currently just regulators.
+ */
+static int smsc911x_disable_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+       int ret = 0;
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+       return ret;
+}
+
+/*
+ * Request resources, currently just regulators.
+ *
+ * The SMSC911x has two power pins: vddvario and vdd33a, in designs where
+ * these are not always-on we need to request regulators to be turned on
+ * before we can try to access the device registers.
+ */
+static int smsc911x_request_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+       int ret = 0;
+
+       /* Request regulators */
+       pdata->supplies[0].supply = "vdd33a";
+       pdata->supplies[1].supply = "vddvario";
+       ret = regulator_bulk_get(&pdev->dev,
+                       ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+       if (ret)
+               netdev_err(ndev, "couldn't get regulators %d\n",
+                               ret);
+       return ret;
+}
+
+/*
+ * Free resources, currently just regulators.
+ *
+ */
+static void smsc911x_free_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+
+       /* Free regulators */
+       regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+}
+
 /* waits for MAC not busy, with timeout.  Only called by smsc911x_mac_read
  * and smsc911x_mac_write, so assumes mac_lock is held */
 static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -1243,10 +1319,92 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
        spin_unlock(&pdata->mac_lock);
 }
 
+static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
+{
+       int rc = 0;
+
+       if (!pdata->phy_dev)
+               return rc;
+
+       rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+       if (rc < 0) {
+               SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+               return rc;
+       }
+
+       /*
+        * If energy is detected the PHY is already awake so is not necessary
+        * to disable the energy detect power-down mode.
+        */
+       if ((rc & MII_LAN83C185_EDPWRDOWN) &&
+           !(rc & MII_LAN83C185_ENERGYON)) {
+               /* Disable energy detect mode for this SMSC Transceivers */
+               rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+                              rc & (~MII_LAN83C185_EDPWRDOWN));
+
+               if (rc < 0) {
+                       SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+                       return rc;
+               }
+
+               mdelay(1);
+       }
+
+       return 0;
+}
+
+static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
+{
+       int rc = 0;
+
+       if (!pdata->phy_dev)
+               return rc;
+
+       rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+       if (rc < 0) {
+               SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+               return rc;
+       }
+
+       /* Only enable if energy detect mode is already disabled */
+       if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
+               mdelay(100);
+               /* Enable energy detect mode for this SMSC Transceivers */
+               rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+                              rc | MII_LAN83C185_EDPWRDOWN);
+
+               if (rc < 0) {
+                       SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+                       return rc;
+               }
+
+               mdelay(1);
+       }
+       return 0;
+}
+
 static int smsc911x_soft_reset(struct smsc911x_data *pdata)
 {
        unsigned int timeout;
        unsigned int temp;
+       int ret;
+
+       /*
+        * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
+        * are initialized in a Energy Detect Power-Down mode that prevents
+        * the MAC chip to be software reseted. So we have to wakeup the PHY
+        * before.
+        */
+       if (pdata->generation == 4) {
+               ret = smsc911x_phy_disable_energy_detect(pdata);
+
+               if (ret) {
+                       SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+                       return ret;
+               }
+       }
 
        /* Reset the LAN911x */
        smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
@@ -1260,6 +1418,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
                SMSC_WARN(pdata, drv, "Failed to complete reset");
                return -EIO;
        }
+
+       if (pdata->generation == 4) {
+               ret = smsc911x_phy_enable_energy_detect(pdata);
+
+               if (ret) {
+                       SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+                       return ret;
+               }
+       }
+
        return 0;
 }
 
@@ -1937,6 +2105,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
 {
        struct smsc911x_data *pdata = netdev_priv(dev);
        unsigned int byte_test;
+       unsigned int to = 100;
 
        SMSC_TRACE(pdata, probe, "Driver Parameters:");
        SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
@@ -1952,6 +2121,17 @@ static int __devinit smsc911x_init(struct net_device *dev)
                return -ENODEV;
        }
 
+       /*
+        * poll the READY bit in PMT_CTRL. Any other access to the device is
+        * forbidden while this bit isn't set. Try for 100ms
+        */
+       while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+               udelay(1000);
+       if (to == 0) {
+               pr_err("Device not READY in 100ms aborting\n");
+               return -ENODEV;
+       }
+
        /* Check byte ordering */
        byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
        SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
@@ -2080,6 +2260,9 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
 
        iounmap(pdata->ioaddr);
 
+       (void)smsc911x_disable_resources(pdev);
+       smsc911x_free_resources(pdev);
+
        free_netdev(dev);
 
        return 0;
@@ -2206,10 +2389,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
        pdata->dev = dev;
        pdata->msg_enable = ((1 << debug) - 1);
 
+       platform_set_drvdata(pdev, dev);
+
+       retval = smsc911x_request_resources(pdev);
+       if (retval)
+               goto out_return_resources;
+
+       retval = smsc911x_enable_resources(pdev);
+       if (retval)
+               goto out_disable_resources;
+
        if (pdata->ioaddr == NULL) {
                SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
                retval = -ENOMEM;
-               goto out_free_netdev_2;
+               goto out_disable_resources;
        }
 
        retval = smsc911x_probe_config_dt(&pdata->config, np);
@@ -2221,7 +2414,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
        if (retval) {
                SMSC_WARN(pdata, probe, "Error smsc911x config not found");
-               goto out_unmap_io_3;
+               goto out_disable_resources;
        }
 
        /* assume standard, non-shifted, access to HW registers */
@@ -2232,7 +2425,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
        retval = smsc911x_init(dev);
        if (retval < 0)
-               goto out_unmap_io_3;
+               goto out_disable_resources;
 
        /* configure irq polarity and type before connecting isr */
        if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
@@ -2252,15 +2445,13 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
        if (retval) {
                SMSC_WARN(pdata, probe,
                          "Unable to claim requested irq: %d", dev->irq);
-               goto out_unmap_io_3;
+               goto out_free_irq;
        }
 
-       platform_set_drvdata(pdev, dev);
-
        retval = register_netdev(dev);
        if (retval) {
                SMSC_WARN(pdata, probe, "Error %i registering device", retval);
-               goto out_unset_drvdata_4;
+               goto out_free_irq;
        } else {
                SMSC_TRACE(pdata, probe,
                           "Network interface: \"%s\"", dev->name);
@@ -2309,12 +2500,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
 out_unregister_netdev_5:
        unregister_netdev(dev);
-out_unset_drvdata_4:
-       platform_set_drvdata(pdev, NULL);
+out_free_irq:
        free_irq(dev->irq, dev);
-out_unmap_io_3:
+out_disable_resources:
+       (void)smsc911x_disable_resources(pdev);
+out_return_resources:
+       smsc911x_free_resources(pdev);
+       platform_set_drvdata(pdev, NULL);
        iounmap(pdata->ioaddr);
-out_free_netdev_2:
        free_netdev(dev);
 out_release_io_1:
        release_mem_region(res->start, resource_size(res));
index 8d67aacf886722c7590f1dbfd674aaf5a5975793..938ecf290813d1140d3c15bdb2b9b8cf8ffffd52 100644 (file)
 #include <asm/smsc911x.h>
 #endif
 
+#ifdef CONFIG_SMSC_PHY
+#include <linux/smscphy.h>
+#endif
+
 #endif                         /* __SMSC911X_H__ */
index edb24b0e337be8d27af132519d771e1f58aa36ac..a9efbdfe5302df8cc22829b37ed53cef6aa653b8 100644 (file)
@@ -279,9 +279,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
 {
        struct smsc9420_pdata *pd = netdev_priv(netdev);
 
-       strcpy(drvinfo->driver, DRV_NAME);
-       strcpy(drvinfo->bus_info, pci_name(pd->pdev));
-       strcpy(drvinfo->version, DRV_VERSION);
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+               sizeof(drvinfo->bus_info));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
 }
 
 static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
index 22745d7bf530d3e4865c869a671f981d4758019d..036428348faa3e5b58cd261b9f0ec45e0ad3e4fc 100644 (file)
@@ -12,11 +12,36 @@ config STMMAC_ETH
 
 if STMMAC_ETH
 
+config STMMAC_PLATFORM
+       tristate "STMMAC platform bus support"
+       depends on STMMAC_ETH
+       default y
+       ---help---
+         This selects the platform specific bus support for
+         the stmmac device driver. This is the driver used
+         on many embedded STM platforms based on ARM and SuperH
+         processors.
+         If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
+
+config STMMAC_PCI
+       tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+       depends on STMMAC_ETH && PCI && EXPERIMENTAL
+       ---help---
+         This is to select the Synopsys DWMAC available on PCI devices,
+         if you have a controller with this interface, say Y or M here.
+
+         This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+         D1215994A VIRTEX FPGA board.
+
+         If unsure, say N.
+
 config STMMAC_DEBUG_FS
        bool "Enable monitoring via sysFS "
        default n
        depends on STMMAC_ETH && DEBUG_FS
-       -- help
+       ---help---
          The stmmac entry in /sys reports DMA TX/RX rings
          or (if supported) the HW cap register.
 
index d7c45164ea798117a85b004b6d0e38a90f3bc5fc..bc965ac9e0250e61f3d4a904989b4d6288abb86f 100644 (file)
@@ -2,6 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
 stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
 stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
 stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o     \
              dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o     \
              dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
index 2cc119295821500b465ddc633516155a69ad0982..d0b814ef0675396d56a1c5771f87ebe37689c09e 100644 (file)
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define STMMAC_VLAN_TAG_USED
 #include <linux/if_vlan.h>
@@ -315,5 +319,8 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
                                unsigned int high, unsigned int low);
 extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
                                unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
index da66ac511c4c60df8dabe849cd02ba9ae2d76dad..4d5402a1d262976bdede2fed63875000d81e17fb 100644 (file)
@@ -39,10 +39,11 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
        /* DMA SW reset */
        value |= DMA_BUS_MODE_SFT_RESET;
        writel(value, ioaddr + DMA_BUS_MODE);
-       limit = 15000;
+       limit = 10;
        while (limit--) {
                if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
                        break;
+               mdelay(10);
        }
        if (limit < 0)
                return -EBUSY;
index 627f656b0f3c581bbb20a45084b7c1fd87a22194..bc17fd08b55dc9085a9ea66cefe1f388fd3ce985 100644 (file)
@@ -41,10 +41,11 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
        /* DMA SW reset */
        value |= DMA_BUS_MODE_SFT_RESET;
        writel(value, ioaddr + DMA_BUS_MODE);
-       limit = 15000;
+       limit = 10;
        while (limit--) {
                if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
                        break;
+               mdelay(10);
        }
        if (limit < 0)
                return -EBUSY;
index e25093510b0cd462af521f45aa5b3f43c98c0213..f20aa12931d00a4a14ca87e6bf2f0493c4d5d536 100644 (file)
@@ -238,6 +238,19 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
        writel(data, ioaddr + low);
 }
 
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+       u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+       if (enable)
+               value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+       else
+               value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+       writel(value, ioaddr + MAC_CTRL_REG);
+}
+
 void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
                         unsigned int high, unsigned int low)
 {
index 9bafa6cf9e8b5af13b262e1efc28809ff9a9c910..120740020e2ca4c68d304629bf5652dd7913ff0b 100644 (file)
@@ -20,7 +20,8 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
-#define DRV_MODULE_VERSION     "Oct_2011"
+#define STMMAC_RESOURCE_NAME   "stmmaceth"
+#define DRV_MODULE_VERSION     "Dec_2011"
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include "common.h"
@@ -72,7 +73,6 @@ struct stmmac_priv {
        spinlock_t lock;
        spinlock_t tx_lock;
        int wolopts;
-       int wolenabled;
        int wol_irq;
 #ifdef CONFIG_STMMAC_TIMER
        struct stmmac_timer *tm;
@@ -80,10 +80,21 @@ struct stmmac_priv {
        struct plat_stmmacenet_data *plat;
        struct stmmac_counters mmc;
        struct dma_features dma_cap;
+       int hw_cap_support;
 };
 
+extern int phyaddr;
+
 extern int stmmac_mdio_unregister(struct net_device *ndev);
 extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
+
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+                               struct plat_stmmacenet_data *plat_dat);
index e8eff09bbbd73c7b036f5cd9520d030c949fddc1..ed83c4c47b8a273383b489106c1f73a74144988c 100644 (file)
@@ -185,9 +185,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
        struct stmmac_priv *priv = netdev_priv(dev);
 
        if (priv->plat->has_gmac)
-               strcpy(info->driver, GMAC_ETHTOOL_NAME);
+               strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
        else
-               strcpy(info->driver, MAC100_ETHTOOL_NAME);
+               strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+                       sizeof(info->driver));
 
        strcpy(info->version, DRV_MODULE_VERSION);
        info->fw_version[0] = '\0';
@@ -430,6 +431,12 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 support = WAKE_MAGIC | WAKE_UCAST;
 
+       /* By default almost all GMAC devices support the WoL via
+        * magic frame but we can disable it if the HW capability
+        * register shows no support for pmt_magic_frame. */
+       if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
+               wol->wolopts &= ~WAKE_MAGIC;
+
        if (!device_can_wakeup(priv->device))
                return -EINVAL;
 
index 20546bbbb8db04744d39b2e4a4e5710f04fa6538..3738b47005489ef35afe054e868a7868a5d1c9b5 100644 (file)
        https://bugzilla.stlinux.com/
 *******************************************************************************/
 
-#include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/skbuff.h>
@@ -52,8 +48,6 @@
 #endif
 #include "stmmac.h"
 
-#define STMMAC_RESOURCE_NAME   "stmmaceth"
-
 #undef STMMAC_DEBUG
 /*#define STMMAC_DEBUG*/
 #ifdef STMMAC_DEBUG
@@ -93,7 +87,7 @@ static int debug = -1;                /* -1: default, 0: no output, 16:  all */
 module_param(debug, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
 
-static int phyaddr = -1;
+int phyaddr = -1;
 module_param(phyaddr, int, S_IRUGO);
 MODULE_PARM_DESC(phyaddr, "Physical device address");
 
@@ -141,6 +135,11 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 
 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
 /**
  * stmmac_verify_args - verify the driver parameters.
  * Description: it verifies if some wrong parameter is passed to the driver.
@@ -321,12 +320,10 @@ static int stmmac_init_phy(struct net_device *dev)
        }
 
        /* Stop Advertising 1000BASE Capability if interface is not GMII */
-       if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) ||
-           (interface == PHY_INTERFACE_MODE_RMII))) {
-               phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
-                                     SUPPORTED_Asym_Pause);
-               phydev->advertising = phydev->supported;
-       }
+       if ((interface == PHY_INTERFACE_MODE_MII) ||
+           (interface == PHY_INTERFACE_MODE_RMII))
+               phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+                                        SUPPORTED_1000baseT_Full);
 
        /*
         * Broken HW is sometimes missing the pull-up resistor on the
@@ -347,22 +344,6 @@ static int stmmac_init_phy(struct net_device *dev)
        return 0;
 }
 
-static inline void stmmac_enable_mac(void __iomem *ioaddr)
-{
-       u32 value = readl(ioaddr + MAC_CTRL_REG);
-
-       value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
-       writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_disable_mac(void __iomem *ioaddr)
-{
-       u32 value = readl(ioaddr + MAC_CTRL_REG);
-
-       value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
-       writel(value, ioaddr + MAC_CTRL_REG);
-}
-
 /**
  * display_ring
  * @p: pointer to the ring.
@@ -783,10 +764,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
 
-       /* Do not manage MMC IRQ (FIXME) */
+       /* Mask MMC irq, counters are managed in SW and registers
+        * are cleared on each READ eventually. */
        dwmac_mmc_intr_all_mask(priv->ioaddr);
-       dwmac_mmc_ctrl(priv->ioaddr, mode);
-       memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+       if (priv->dma_cap.rmon) {
+               dwmac_mmc_ctrl(priv->ioaddr, mode);
+               memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+       } else
+               pr_info(" No MAC Management Counters available");
 }
 
 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -807,8 +793,29 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
        return 0;
 }
 
-/* New GMAC chips support a new register to indicate the
- * presence of the optional feature/functions.
+/**
+ * stmmac_selec_desc_mode
+ * @dev : device pointer
+ * Description: select the Enhanced/Alternate or Normal descriptors */
+static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
+{
+       if (priv->plat->enh_desc) {
+               pr_info(" Enhanced/Alternate descriptors\n");
+               priv->hw->desc = &enh_desc_ops;
+       } else {
+               pr_info(" Normal descriptors\n");
+               priv->hw->desc = &ndesc_ops;
+       }
+}
+
+/**
+ * stmmac_get_hw_features
+ * @priv : private device pointer
+ * Description:
+ *  new GMAC chip generations have a new register to indicate the
+ *  presence of the optional feature/functions.
+ *  This can be also used to override the value passed through the
+ *  platform and necessary for old MAC10/100 and GMAC chips.
  */
 static int stmmac_get_hw_features(struct stmmac_priv *priv)
 {
@@ -829,7 +836,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
                        (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
                priv->dma_cap.pmt_magic_frame =
                        (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
-               /*MMC*/
+               /* MMC */
                priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
                /* IEEE 1588-2002*/
                priv->dma_cap.time_stamp =
@@ -857,12 +864,58 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
                priv->dma_cap.enh_desc =
                        (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
 
-       } else
-               pr_debug("\tNo HW DMA feature register supported");
+       }
 
        return hw_cap;
 }
 
+/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: this is to attach the GMAC or MAC 10/100
+ * main core structures that will be completed during the
+ * open step.
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       struct mac_device_info *device;
+
+       if (priv->plat->has_gmac)
+               device = dwmac1000_setup(priv->ioaddr);
+       else
+               device = dwmac100_setup(priv->ioaddr);
+
+       if (!device)
+               return -ENOMEM;
+
+       priv->hw = device;
+       priv->hw->ring = &ring_mode_ops;
+
+       if (device_can_wakeup(priv->device)) {
+               priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+               enable_irq_wake(priv->wol_irq);
+       }
+
+       return 0;
+}
+
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+       /* verify if the MAC address is valid, in case of failures it
+        * generates a random MAC address */
+       if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+               priv->hw->mac->get_umac_addr((void __iomem *)
+                                            priv->dev->base_addr,
+                                            priv->dev->dev_addr, 0);
+               if  (!is_valid_ether_addr(priv->dev->dev_addr))
+                       random_ether_addr(priv->dev->dev_addr);
+       }
+       pr_warning("%s: device MAC address %pM\n", priv->dev->name,
+                                                  priv->dev->dev_addr);
+}
+
 /**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
@@ -877,18 +930,28 @@ static int stmmac_open(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret;
 
-       /* Check that the MAC address is valid.  If its not, refuse
-        * to bring the device up. The user must specify an
-        * address using the following linux command:
-        *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
-       if (!is_valid_ether_addr(dev->dev_addr)) {
-               random_ether_addr(dev->dev_addr);
-               pr_warning("%s: generated random MAC address %pM\n", dev->name,
-                       dev->dev_addr);
-       }
+       /* MAC HW device setup */
+       ret = stmmac_mac_device_setup(dev);
+       if (ret < 0)
+               return ret;
+
+       stmmac_check_ether_addr(priv);
 
        stmmac_verify_args();
 
+       /* Override with kernel parameters if supplied XXX CRS XXX
+        * this needs to have multiple instances */
+       if ((phyaddr >= 0) && (phyaddr <= 31))
+               priv->plat->phy_addr = phyaddr;
+
+       /* MDIO bus Registration */
+       ret = stmmac_mdio_register(dev);
+       if (ret < 0) {
+               pr_debug("%s: MDIO bus (id: %d) registration failed",
+                        __func__, priv->plat->bus_id);
+               return ret;
+       }
+
 #ifdef CONFIG_STMMAC_TIMER
        priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
        if (unlikely(priv->tm == NULL)) {
@@ -913,6 +976,44 @@ static int stmmac_open(struct net_device *dev)
                goto open_error;
        }
 
+       stmmac_get_synopsys_id(priv);
+
+       priv->hw_cap_support = stmmac_get_hw_features(priv);
+
+       if (priv->hw_cap_support) {
+               pr_info(" Support DMA HW capability register");
+
+               /* We can override some gmac/dma configuration fields: e.g.
+                * enh_desc, tx_coe (e.g. that are passed through the
+                * platform) with the values from the HW capability
+                * register (if supported).
+                */
+               priv->plat->enh_desc = priv->dma_cap.enh_desc;
+               priv->plat->tx_coe = priv->dma_cap.tx_coe;
+               priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+
+               /* By default disable wol on magic frame if not supported */
+               if (!priv->dma_cap.pmt_magic_frame)
+                       priv->wolopts &= ~WAKE_MAGIC;
+
+       } else
+               pr_info(" No HW DMA feature register supported");
+
+       /* Select the enhnaced/normal descriptor structures */
+       stmmac_selec_desc_mode(priv);
+
+       /* PMT module is not integrated in all the MAC devices. */
+       if (priv->plat->pmt) {
+               pr_info(" Remote wake-up capable\n");
+               device_set_wakeup_capable(priv->device, 1);
+       }
+
+       priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
+       if (priv->rx_coe)
+               pr_info(" Checksum Offload Engine supported\n");
+       if (priv->plat->tx_coe)
+               pr_info(" Checksum insertion supported\n");
+
        /* Create and initialize the TX/RX descriptors chains. */
        priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
        priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
@@ -935,15 +1036,6 @@ static int stmmac_open(struct net_device *dev)
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->ioaddr);
 
-       stmmac_get_synopsys_id(priv);
-
-       stmmac_get_hw_features(priv);
-
-       priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
-       if (priv->rx_coe)
-               pr_info("stmmac: Rx Checksum Offload Engine supported\n");
-       if (priv->plat->tx_coe)
-               pr_info("\tTX Checksum insertion supported\n");
        netdev_update_features(dev);
 
        /* Request the IRQ lines */
@@ -956,7 +1048,7 @@ static int stmmac_open(struct net_device *dev)
        }
 
        /* Enable the MAC Rx/Tx */
-       stmmac_enable_mac(priv->ioaddr);
+       stmmac_set_mac(priv->ioaddr, true);
 
        /* Set the HW DMA mode and the COE */
        stmmac_dma_operation_mode(priv);
@@ -965,9 +1057,13 @@ static int stmmac_open(struct net_device *dev)
        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
        priv->xstats.threshold = tc;
 
-       if (priv->dma_cap.rmon)
-               stmmac_mmc_setup(priv);
+       stmmac_mmc_setup(priv);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+       ret = stmmac_init_fs(dev);
+       if (ret < 0)
+               pr_warning("\tFailed debugFS registration");
+#endif
        /* Start the ball rolling... */
        DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
        priv->hw->dma->start_tx(priv->ioaddr);
@@ -1040,10 +1136,15 @@ static int stmmac_release(struct net_device *dev)
        free_dma_desc_resources(priv);
 
        /* Disable the MAC Rx/Tx */
-       stmmac_disable_mac(priv->ioaddr);
+       stmmac_set_mac(priv->ioaddr, false);
 
        netif_carrier_off(dev);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+       stmmac_exit_fs();
+#endif
+       stmmac_mdio_unregister(dev);
+
        return 0;
 }
 
@@ -1419,7 +1520,8 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
@@ -1489,9 +1591,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        if (!priv->phydev)
                return -EINVAL;
 
-       spin_lock(&priv->lock);
        ret = phy_mii_ioctl(priv->phydev, rq, cmd);
-       spin_unlock(&priv->lock);
 
        return ret;
 }
@@ -1558,7 +1658,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
        struct net_device *dev = seq->private;
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (!stmmac_get_hw_features(priv)) {
+       if (!priv->hw_cap_support) {
                seq_printf(seq, "DMA HW features not supported\n");
                return 0;
        }
@@ -1689,28 +1789,41 @@ static const struct net_device_ops stmmac_netdev_ops = {
 };
 
 /**
- * stmmac_probe - Initialization of the adapter .
- * @dev : device pointer
- * Description: The function initializes the network device structure for
- * the STMMAC driver. It also calls the low level routines
- * in order to init the HW (i.e. the DMA engine)
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
  */
-static int stmmac_probe(struct net_device *dev)
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+                                       struct plat_stmmacenet_data *plat_dat)
 {
        int ret = 0;
-       struct stmmac_priv *priv = netdev_priv(dev);
+       struct net_device *ndev = NULL;
+       struct stmmac_priv *priv;
+
+       ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+       if (!ndev) {
+               pr_err("%s: ERROR: allocating the device\n", __func__);
+               return NULL;
+       }
+
+       SET_NETDEV_DEV(ndev, device);
+
+       priv = netdev_priv(ndev);
+       priv->device = device;
+       priv->dev = ndev;
 
-       ether_setup(dev);
+       ether_setup(ndev);
 
-       dev->netdev_ops = &stmmac_netdev_ops;
-       stmmac_set_ethtool_ops(dev);
+       ndev->netdev_ops = &stmmac_netdev_ops;
+       stmmac_set_ethtool_ops(ndev);
 
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
-       dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+       ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+       ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
 #ifdef STMMAC_VLAN_TAG_USED
        /* Both mac100 and gmac support receive VLAN tag detection */
-       dev->features |= NETIF_F_HW_VLAN_RX;
+       ndev->features |= NETIF_F_HW_VLAN_RX;
 #endif
        priv->msg_enable = netif_msg_init(debug, default_msg_level);
 
@@ -1718,272 +1831,73 @@ static int stmmac_probe(struct net_device *dev)
                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
 
        priv->pause = pause;
-       netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
-
-       /* Get the MAC address */
-       priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
-                                    dev->dev_addr, 0);
-
-       if (!is_valid_ether_addr(dev->dev_addr))
-               pr_warning("\tno valid MAC address;"
-                       "please, use ifconfig or nwhwconfig!\n");
+       priv->plat = plat_dat;
+       netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
 
        spin_lock_init(&priv->lock);
        spin_lock_init(&priv->tx_lock);
 
-       ret = register_netdev(dev);
+       ret = register_netdev(ndev);
        if (ret) {
                pr_err("%s: ERROR %i registering the device\n",
                       __func__, ret);
-               return -ENODEV;
+               goto error;
        }
 
        DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
-           dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
-           (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
-
-       return ret;
-}
-
-/**
- * stmmac_mac_device_setup
- * @dev : device pointer
- * Description: select and initialise the mac device (mac100 or Gmac).
- */
-static int stmmac_mac_device_setup(struct net_device *dev)
-{
-       struct stmmac_priv *priv = netdev_priv(dev);
-
-       struct mac_device_info *device;
-
-       if (priv->plat->has_gmac) {
-               dev->priv_flags |= IFF_UNICAST_FLT;
-               device = dwmac1000_setup(priv->ioaddr);
-       } else {
-               device = dwmac100_setup(priv->ioaddr);
-       }
-
-       if (!device)
-               return -ENOMEM;
-
-       if (priv->plat->enh_desc) {
-               device->desc = &enh_desc_ops;
-               pr_info("\tEnhanced descriptor structure\n");
-       } else
-               device->desc = &ndesc_ops;
-
-       priv->hw = device;
-       priv->hw->ring = &ring_mode_ops;
-
-       if (device_can_wakeup(priv->device)) {
-               priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
-               enable_irq_wake(priv->wol_irq);
-       }
-
-       return 0;
-}
-
-/**
- * stmmac_dvr_probe
- * @pdev: platform device pointer
- * Description: the driver is initialized through platform_device.
- */
-static int stmmac_dvr_probe(struct platform_device *pdev)
-{
-       int ret = 0;
-       struct resource *res;
-       void __iomem *addr = NULL;
-       struct net_device *ndev = NULL;
-       struct stmmac_priv *priv = NULL;
-       struct plat_stmmacenet_data *plat_dat;
-
-       pr_info("STMMAC driver:\n\tplatform registration... ");
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-       pr_info("\tdone!\n");
-
-       if (!request_mem_region(res->start, resource_size(res),
-                               pdev->name)) {
-               pr_err("%s: ERROR: memory allocation failed"
-                      "cannot get the I/O addr 0x%x\n",
-                      __func__, (unsigned int)res->start);
-               return -EBUSY;
-       }
-
-       addr = ioremap(res->start, resource_size(res));
-       if (!addr) {
-               pr_err("%s: ERROR: memory mapping failed\n", __func__);
-               ret = -ENOMEM;
-               goto out_release_region;
-       }
-
-       ndev = alloc_etherdev(sizeof(struct stmmac_priv));
-       if (!ndev) {
-               pr_err("%s: ERROR: allocating the device\n", __func__);
-               ret = -ENOMEM;
-               goto out_unmap;
-       }
-
-       SET_NETDEV_DEV(ndev, &pdev->dev);
-
-       /* Get the MAC information */
-       ndev->irq = platform_get_irq_byname(pdev, "macirq");
-       if (ndev->irq == -ENXIO) {
-               pr_err("%s: ERROR: MAC IRQ configuration "
-                      "information not found\n", __func__);
-               ret = -ENXIO;
-               goto out_free_ndev;
-       }
-
-       priv = netdev_priv(ndev);
-       priv->device = &(pdev->dev);
-       priv->dev = ndev;
-       plat_dat = pdev->dev.platform_data;
-
-       priv->plat = plat_dat;
-
-       priv->ioaddr = addr;
-
-       /* PMT module is not integrated in all the MAC devices. */
-       if (plat_dat->pmt) {
-               pr_info("\tPMT module supported\n");
-               device_set_wakeup_capable(&pdev->dev, 1);
-       }
-       /*
-        * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
-        * The external wake up irq can be passed through the platform code
-        * named as "eth_wake_irq"
-        *
-        * In case the wake up interrupt is not passed from the platform
-        * so the driver will continue to use the mac irq (ndev->irq)
-        */
-       priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-       if (priv->wol_irq == -ENXIO)
-               priv->wol_irq = ndev->irq;
-
-
-       platform_set_drvdata(pdev, ndev);
-
-       /* Set the I/O base addr */
-       ndev->base_addr = (unsigned long)addr;
+           ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off",
+           (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off");
 
-       /* Custom initialisation */
-       if (priv->plat->init) {
-               ret = priv->plat->init(pdev);
-               if (unlikely(ret))
-                       goto out_free_ndev;
-       }
-
-       /* MAC HW revice detection */
-       ret = stmmac_mac_device_setup(ndev);
-       if (ret < 0)
-               goto out_plat_exit;
-
-       /* Network Device Registration */
-       ret = stmmac_probe(ndev);
-       if (ret < 0)
-               goto out_plat_exit;
-
-       /* Override with kernel parameters if supplied XXX CRS XXX
-        * this needs to have multiple instances */
-       if ((phyaddr >= 0) && (phyaddr <= 31))
-               priv->plat->phy_addr = phyaddr;
-
-       pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
-              "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
-              pdev->id, ndev->irq, addr);
-
-       /* MDIO bus Registration */
-       pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
-       ret = stmmac_mdio_register(ndev);
-       if (ret < 0)
-               goto out_unregister;
-       pr_debug("registered!\n");
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
-       ret = stmmac_init_fs(ndev);
-       if (ret < 0)
-               pr_warning("\tFailed debugFS registration");
-#endif
+       return priv;
 
-       return 0;
+error:
+       netif_napi_del(&priv->napi);
 
-out_unregister:
        unregister_netdev(ndev);
-out_plat_exit:
-       if (priv->plat->exit)
-               priv->plat->exit(pdev);
-out_free_ndev:
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
-out_unmap:
-       iounmap(addr);
-out_release_region:
-       release_mem_region(res->start, resource_size(res));
 
-       return ret;
+       return NULL;
 }
 
 /**
  * stmmac_dvr_remove
- * @pdev: platform device pointer
+ * @ndev: net device pointer
  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
- * changes the link status, releases the DMA descriptor rings,
- * unregisters the MDIO bus and unmaps the allocated memory.
+ * changes the link status, releases the DMA descriptor rings.
  */
-static int stmmac_dvr_remove(struct platform_device *pdev)
+int stmmac_dvr_remove(struct net_device *ndev)
 {
-       struct net_device *ndev = platform_get_drvdata(pdev);
        struct stmmac_priv *priv = netdev_priv(ndev);
-       struct resource *res;
 
        pr_info("%s:\n\tremoving driver", __func__);
 
        priv->hw->dma->stop_rx(priv->ioaddr);
        priv->hw->dma->stop_tx(priv->ioaddr);
 
-       stmmac_disable_mac(priv->ioaddr);
-
+       stmmac_set_mac(priv->ioaddr, false);
        netif_carrier_off(ndev);
-
-       stmmac_mdio_unregister(ndev);
-
-       if (priv->plat->exit)
-               priv->plat->exit(pdev);
-
-       platform_set_drvdata(pdev, NULL);
        unregister_netdev(ndev);
-
-       iounmap((void *)priv->ioaddr);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
-       stmmac_exit_fs();
-#endif
-
        free_netdev(ndev);
 
        return 0;
 }
 
 #ifdef CONFIG_PM
-static int stmmac_suspend(struct device *dev)
+int stmmac_suspend(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
        int dis_ic = 0;
 
        if (!ndev || !netif_running(ndev))
                return 0;
 
+       if (priv->phydev)
+               phy_stop(priv->phydev);
+
        spin_lock(&priv->lock);
 
        netif_device_detach(ndev);
        netif_stop_queue(ndev);
-       if (priv->phydev)
-               phy_stop(priv->phydev);
 
 #ifdef CONFIG_STMMAC_TIMER
        priv->tm->timer_stop();
@@ -2004,15 +1918,14 @@ static int stmmac_suspend(struct device *dev)
        if (device_may_wakeup(priv->device))
                priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
        else
-               stmmac_disable_mac(priv->ioaddr);
+               stmmac_set_mac(priv->ioaddr, false);
 
        spin_unlock(&priv->lock);
        return 0;
 }
 
-static int stmmac_resume(struct device *dev)
+int stmmac_resume(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
 
        if (!netif_running(ndev))
@@ -2031,7 +1944,7 @@ static int stmmac_resume(struct device *dev)
        netif_device_attach(ndev);
 
        /* Enable the MAC and DMA */
-       stmmac_enable_mac(priv->ioaddr);
+       stmmac_set_mac(priv->ioaddr, true);
        priv->hw->dma->start_tx(priv->ioaddr);
        priv->hw->dma->start_rx(priv->ioaddr);
 
@@ -2041,77 +1954,33 @@ static int stmmac_resume(struct device *dev)
 #endif
        napi_enable(&priv->napi);
 
-       if (priv->phydev)
-               phy_start(priv->phydev);
-
        netif_start_queue(ndev);
 
        spin_unlock(&priv->lock);
+
+       if (priv->phydev)
+               phy_start(priv->phydev);
+
        return 0;
 }
 
-static int stmmac_freeze(struct device *dev)
+int stmmac_freeze(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
-
        if (!ndev || !netif_running(ndev))
                return 0;
 
        return stmmac_release(ndev);
 }
 
-static int stmmac_restore(struct device *dev)
+int stmmac_restore(struct net_device *ndev)
 {
-       struct net_device *ndev = dev_get_drvdata(dev);
-
        if (!ndev || !netif_running(ndev))
                return 0;
 
        return stmmac_open(ndev);
 }
-
-static const struct dev_pm_ops stmmac_pm_ops = {
-       .suspend = stmmac_suspend,
-       .resume = stmmac_resume,
-       .freeze = stmmac_freeze,
-       .thaw = stmmac_restore,
-       .restore = stmmac_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pm_ops;
 #endif /* CONFIG_PM */
 
-static struct platform_driver stmmac_driver = {
-       .probe = stmmac_dvr_probe,
-       .remove = stmmac_dvr_remove,
-       .driver = {
-               .name = STMMAC_RESOURCE_NAME,
-               .owner = THIS_MODULE,
-               .pm = &stmmac_pm_ops,
-       },
-};
-
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
-       int ret;
-
-       ret = platform_driver_register(&stmmac_driver);
-       return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
-       platform_driver_unregister(&stmmac_driver);
-}
-
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
@@ -2171,9 +2040,6 @@ err:
 __setup("stmmaceth=", stmmac_cmdline_opt);
 #endif
 
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index 9c3b9d5c3411b4141283a37003d5a2f1839f09b8..51f4412339627c4a09cbd1ac0fae90ddd1d4f40d 100644 (file)
@@ -109,6 +109,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
  */
 static int stmmac_mdio_reset(struct mii_bus *bus)
 {
+#if defined(CONFIG_STMMAC_PLATFORM)
        struct net_device *ndev = bus->priv;
        struct stmmac_priv *priv = netdev_priv(ndev);
        unsigned int mii_address = priv->hw->mii.addr;
@@ -123,7 +124,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
         * on MDC, so perform a dummy mdio read.
         */
        writel(0, priv->ioaddr + mii_address);
-
+#endif
        return 0;
 }
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644 (file)
index 0000000..54a819a
--- /dev/null
@@ -0,0 +1,221 @@
+/*******************************************************************************
+  This contains the functions to handle the pci driver.
+
+  Copyright (C) 2011-2012  Vayavya Labs Pvt Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+
+static void stmmac_default_data(void)
+{
+       memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+       plat_dat.bus_id = 1;
+       plat_dat.phy_addr = 0;
+       plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+       plat_dat.pbl = 32;
+       plat_dat.clk_csr = 2;   /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+       plat_dat.has_gmac = 1;
+       plat_dat.force_sf_dma_mode = 1;
+
+       mdio_data.bus_id = 1;
+       mdio_data.phy_reset = NULL;
+       mdio_data.phy_mask = 0;
+       plat_dat.mdio_bus_data = &mdio_data;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
+                                     const struct pci_device_id *id)
+{
+       int ret = 0;
+       void __iomem *addr = NULL;
+       struct stmmac_priv *priv = NULL;
+       int i;
+
+       /* Enable pci device */
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+                      pci_name(pdev));
+               return ret;
+       }
+       if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+               pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+               ret = -ENODEV;
+               goto err_out_req_reg_failed;
+       }
+
+       /* Get the base address of device */
+       for (i = 0; i <= 5; i++) {
+               if (pci_resource_len(pdev, i) == 0)
+                       continue;
+               addr = pci_iomap(pdev, i, 0);
+               if (addr == NULL) {
+                       pr_err("%s: ERROR: cannot map regiser memory, aborting",
+                              __func__);
+                       ret = -EIO;
+                       goto err_out_map_failed;
+               }
+               break;
+       }
+       pci_set_master(pdev);
+
+       stmmac_default_data();
+
+       priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat);
+       if (!priv) {
+               pr_err("%s: main drivr probe failed", __func__);
+               goto err_out;
+       }
+       priv->ioaddr = addr;
+       priv->dev->base_addr = (unsigned long)addr;
+       priv->dev->irq = pdev->irq;
+       priv->wol_irq = pdev->irq;
+
+       pci_set_drvdata(pdev, priv->dev);
+
+       pr_debug("STMMAC platform driver registration completed");
+
+       return 0;
+
+err_out:
+       pci_clear_master(pdev);
+err_out_map_failed:
+       pci_release_regions(pdev);
+err_out_req_reg_failed:
+       pci_disable_device(pdev);
+
+       return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+
+       stmmac_dvr_remove(ndev);
+
+       pci_set_drvdata(pdev, NULL);
+       pci_iounmap(pdev, priv->ioaddr);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       int ret;
+
+       ret = stmmac_suspend(ndev);
+       pci_save_state(pdev);
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+
+       return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+       {
+       PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
+       }
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_driver = {
+       .name = STMMAC_RESOURCE_NAME,
+       .id_table = stmmac_id_table,
+       .probe = stmmac_pci_probe,
+       .remove = __devexit_p(stmmac_pci_remove),
+#ifdef CONFIG_PM
+       .suspend = stmmac_pci_suspend,
+       .resume = stmmac_pci_resume,
+#endif
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+       int ret;
+
+       ret = pci_register_driver(&stmmac_driver);
+       if (ret < 0)
+               pr_err("%s: ERROR: driver registration failed\n", __func__);
+
+       return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+       pci_unregister_driver(&stmmac_driver);
+}
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644 (file)
index 0000000..7b1594f
--- /dev/null
@@ -0,0 +1,198 @@
+/*******************************************************************************
+  This contains the functions to handle the platform driver.
+
+  Copyright (C) 2007-2011  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include "stmmac.h"
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct resource *res;
+       void __iomem *addr = NULL;
+       struct stmmac_priv *priv = NULL;
+       struct plat_stmmacenet_data *plat_dat;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+               pr_err("%s: ERROR: memory allocation failed"
+                      "cannot get the I/O addr 0x%x\n",
+                      __func__, (unsigned int)res->start);
+               return -EBUSY;
+       }
+
+       addr = ioremap(res->start, resource_size(res));
+       if (!addr) {
+               pr_err("%s: ERROR: memory mapping failed", __func__);
+               ret = -ENOMEM;
+               goto out_release_region;
+       }
+       plat_dat = pdev->dev.platform_data;
+       priv = stmmac_dvr_probe(&(pdev->dev), plat_dat);
+       if (!priv) {
+               pr_err("%s: main drivr probe failed", __func__);
+               goto out_release_region;
+       }
+
+       priv->ioaddr = addr;
+       /* Set the I/O base addr */
+       priv->dev->base_addr = (unsigned long)addr;
+
+       /* Get the MAC information */
+       priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+       if (priv->dev->irq == -ENXIO) {
+               pr_err("%s: ERROR: MAC IRQ configuration "
+                      "information not found\n", __func__);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       /*
+        * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+        * The external wake up irq can be passed through the platform code
+        * named as "eth_wake_irq"
+        *
+        * In case the wake up interrupt is not passed from the platform
+        * so the driver will continue to use the mac irq (ndev->irq)
+        */
+       priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       if (priv->wol_irq == -ENXIO)
+               priv->wol_irq = priv->dev->irq;
+
+       platform_set_drvdata(pdev, priv->dev);
+
+       /* Custom initialisation */
+       if (priv->plat->init) {
+               ret = priv->plat->init(pdev);
+               if (unlikely(ret))
+                       goto out_unmap;
+       }
+
+       pr_debug("STMMAC platform driver registration completed");
+
+       return 0;
+
+out_unmap:
+       iounmap(addr);
+       platform_set_drvdata(pdev, NULL);
+
+out_release_region:
+       release_mem_region(res->start, resource_size(res));
+
+       return ret;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       struct resource *res;
+       int ret = stmmac_dvr_remove(ndev);
+
+       if (priv->plat->exit)
+               priv->plat->exit(pdev);
+
+       if (priv->plat->exit)
+               priv->plat->exit(pdev);
+
+       platform_set_drvdata(pdev, NULL);
+
+       iounmap((void *)priv->ioaddr);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(res->start, resource_size(res));
+
+       return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_freeze(ndev);
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+       .suspend = stmmac_pltfr_suspend,
+       .resume = stmmac_pltfr_resume,
+       .freeze = stmmac_pltfr_freeze,
+       .thaw = stmmac_pltfr_restore,
+       .restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+       .probe = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+                  .name = STMMAC_RESOURCE_NAME,
+                  .owner = THIS_MODULE,
+                  .pm = &stmmac_pltfr_pm_ops,
+                  },
+};
+
+module_platform_driver(stmmac_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
index fd40988c19a66af818bb006bee438fd5426f861a..f10665f594c4f8aaaf37b1e81632e80513f28447 100644 (file)
@@ -4532,10 +4532,9 @@ static void cas_set_multicast(struct net_device *dev)
 static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct cas *cp = netdev_priv(dev);
-       strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
-       strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
-       info->fw_version[0] = '\0';
-       strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
        info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
                cp->casreg_len : CAS_MAX_REGS;
        info->n_stats = CAS_NUM_STAT_KEYS;
index 73c708107a379543b8695dd470a771f47b30feac..cf433931304fd5cf2544bdfc5d3f58609d4bd81c 100644 (file)
@@ -1151,19 +1151,8 @@ static int link_status_mii(struct niu *np, int *link_up_p)
                supported |= SUPPORTED_1000baseT_Full;
        lp->supported = supported;
 
-       advertising = 0;
-       if (advert & ADVERTISE_10HALF)
-               advertising |= ADVERTISED_10baseT_Half;
-       if (advert & ADVERTISE_10FULL)
-               advertising |= ADVERTISED_10baseT_Full;
-       if (advert & ADVERTISE_100HALF)
-               advertising |= ADVERTISED_100baseT_Half;
-       if (advert & ADVERTISE_100FULL)
-               advertising |= ADVERTISED_100baseT_Full;
-       if (ctrl1000 & ADVERTISE_1000HALF)
-               advertising |= ADVERTISED_1000baseT_Half;
-       if (ctrl1000 & ADVERTISE_1000FULL)
-               advertising |= ADVERTISED_1000baseT_Full;
+       advertising = mii_adv_to_ethtool_adv_t(advert);
+       advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 
        if (bmcr & BMCR_ANENABLE) {
                int neg, neg1000;
@@ -3609,6 +3598,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 {
        struct netdev_queue *txq;
+       unsigned int tx_bytes;
        u16 pkt_cnt, tmp;
        int cons, index;
        u64 cs;
@@ -3631,12 +3621,18 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
        netif_printk(np, tx_done, KERN_DEBUG, np->dev,
                     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 
-       while (pkt_cnt--)
+       tx_bytes = 0;
+       tmp = pkt_cnt;
+       while (tmp--) {
+               tx_bytes += rp->tx_buffs[cons].skb->len;
                cons = release_tx_packet(np, rp, cons);
+       }
 
        rp->cons = cons;
        smp_mb();
 
+       netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+
 out:
        if (unlikely(netif_tx_queue_stopped(txq) &&
                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4337,6 +4333,7 @@ static void niu_free_channels(struct niu *np)
                        struct tx_ring_info *rp = &np->tx_rings[i];
 
                        niu_free_tx_ring_info(np, rp);
+                       netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
                }
                kfree(np->tx_rings);
                np->tx_rings = NULL;
@@ -6742,6 +6739,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
                prod = NEXT_TX(rp, prod);
        }
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        if (prod < rp->prod)
                rp->wrap_bit ^= TX_RING_KICK_WRAP;
        rp->prod = prod;
@@ -6823,12 +6822,13 @@ static void niu_get_drvinfo(struct net_device *dev,
        struct niu *np = netdev_priv(dev);
        struct niu_vpd *vpd = &np->vpd;
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       sprintf(info->fw_version, "%d.%d",
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
                vpd->fcode_major, vpd->fcode_minor);
        if (np->parent->plat_type != PLAT_TYPE_NIU)
-               strcpy(info->bus_info, pci_name(np->pdev));
+               strlcpy(info->bus_info, pci_name(np->pdev),
+                       sizeof(info->bus_info));
 }
 
 static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -8589,9 +8589,11 @@ static int __devinit phy_record(struct niu_parent *parent,
        if (dev_id_1 < 0 || dev_id_2 < 0)
                return 0;
        if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+               /* Because of the NIU_PHY_ID_MASK being applied, the 8704
+                * test covers the 8706 as well.
+                */
                if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
-                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
-                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
                        return 0;
        } else {
                if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
index 0d8cfd9ea0538a0fd4379f4d54d92ed297bb2d22..220f724c3377dcfb8a152174100f80e1395674d7 100644 (file)
@@ -1293,15 +1293,4 @@ static struct platform_driver bigmac_sbus_driver = {
        .remove         = __devexit_p(bigmac_sbus_remove),
 };
 
-static int __init bigmac_init(void)
-{
-       return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
-       platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
+module_platform_driver(bigmac_sbus_driver);
index ceab215bb4a31f3f50b0c20465e2593f52385b4a..31441a870b0b84bba089b757a12d73d9657e451e 100644 (file)
@@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct gem *gp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(gp->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
 }
 
 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index c517dac02ae1837343ff48465274f7b3e54caae3..09c518655db2bcc36ffca85518325a6af3358ecf 100644 (file)
@@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct happy_meal *hp = netdev_priv(dev);
 
-       strcpy(info->driver, "sunhme");
-       strcpy(info->version, "2.02");
+       strlcpy(info->driver, "sunhme", sizeof(info->driver));
+       strlcpy(info->version, "2.02", sizeof(info->version));
        if (hp->happy_flags & HFLAG_PCI) {
                struct pci_dev *pdev = hp->happy_dev;
-               strcpy(info->bus_info, pci_name(pdev));
+               strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
        }
 #ifdef CONFIG_SBUS
        else {
@@ -2469,7 +2469,8 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
                struct platform_device *op = hp->happy_dev;
                regs = of_get_property(op->dev.of_node, "regs", NULL);
                if (regs)
-                       sprintf(info->bus_info, "SBUS:%d",
+                       snprintf(info->bus_info, sizeof(info->bus_info),
+                               "SBUS:%d",
                                regs->which_io);
        }
 #endif
@@ -2637,7 +2638,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
        sbus_dp = op->dev.parent->of_node;
 
        /* We can match PCI devices too, do not accept those here. */
-       if (strcmp(sbus_dp->name, "sbus"))
+       if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
                return err;
 
        if (is_qfe) {
@@ -2849,7 +2850,7 @@ err_out:
 static int is_quattro_p(struct pci_dev *pdev)
 {
        struct pci_dev *busdev = pdev->bus->self;
-       struct list_head *tmp;
+       struct pci_dev *this_pdev;
        int n_hmes;
 
        if (busdev == NULL ||
@@ -2858,15 +2859,10 @@ static int is_quattro_p(struct pci_dev *pdev)
                return 0;
 
        n_hmes = 0;
-       tmp = pdev->bus->devices.next;
-       while (tmp != &pdev->bus->devices) {
-               struct pci_dev *this_pdev = pci_dev_b(tmp);
-
+       list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
                if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
                    this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
                        n_hmes++;
-
-               tmp = tmp->next;
        }
 
        if (n_hmes != 4)
index 3a90af6d111ce5266db549d3610bb685446047a4..4b19e9b0606b842e0bed7f40bdc5fc0400901b5e 100644 (file)
@@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
  * @ndev network device
  * @vid  VLAN vid to add
  */
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
 {
        __bdx_vlan_rx_vid(ndev, vid, 1);
+       return 0;
 }
 
 /*
@@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
  * @ndev network device
  * @vid  VLAN vid to kill
  */
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
 {
        __bdx_vlan_rx_vid(ndev, vid, 0);
+       return 0;
 }
 
 /**
index dca9d3369cdd9de8eb5f1156778369d76006c246..c97d2f59085504274eddcc2d5a25ecf352c824f0 100644 (file)
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 
        /* handle completed packets */
+       spin_unlock_irqrestore(&chan->lock, flags);
        do {
                ret = __cpdma_chan_process(chan);
                if (ret < 0)
                        break;
        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+       spin_lock_irqsave(&chan->lock, flags);
 
        /* remaining packets haven't been tx/rx'ed, clean them up */
        while (chan->head) {
index 815c7970261bc2ee09e8b8107afe6463aeffe69c..794ac30a577b61b1baaf8a1499c1e0f9ee41bcaf 100644 (file)
@@ -115,6 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
 #define EMAC_DEF_TX_CH                 (0) /* Default 0th channel */
 #define EMAC_DEF_RX_CH                 (0) /* Default 0th channel */
 #define EMAC_DEF_RX_NUM_DESC           (128)
+#define EMAC_DEF_TX_NUM_DESC           (128)
 #define EMAC_DEF_MAX_TX_CH             (1) /* Max TX channels configured */
 #define EMAC_DEF_MAX_RX_CH             (1) /* Max RX channels configured */
 #define EMAC_POLL_WEIGHT               (64) /* Default NAPI poll weight */
@@ -336,6 +337,7 @@ struct emac_priv {
        u32 mac_hash2;
        u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
        u32 rx_addr_type;
+       atomic_t cur_tx;
        const char *phy_id;
        struct phy_device *phydev;
        spinlock_t lock;
@@ -1044,6 +1046,9 @@ static void emac_tx_handler(void *token, int len, int status)
 {
        struct sk_buff          *skb = token;
        struct net_device       *ndev = skb->dev;
+       struct emac_priv        *priv = netdev_priv(ndev);
+
+       atomic_dec(&priv->cur_tx);
 
        if (unlikely(netif_queue_stopped(ndev)))
                netif_start_queue(ndev);
@@ -1092,6 +1097,9 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
                goto fail_tx;
        }
 
+       if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+               netif_stop_queue(ndev);
+
        return NETDEV_TX_OK;
 
 fail_tx:
index 10826d8a2a2df36b3731051b2d4abb504c15ce39..6b75063988ecdb4260b625dcacd465c7fe08bcf4 100644 (file)
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
                goto done;
 
        /* Re-enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 
        /* HACK: Avoid the "rotting packet" problem (see above). */
        if (qup->__packet_receive_read !=
@@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev)
                          sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
                panic("Failed to stop LIPP/LEPP!\n");
 
-       priv->partly_opened = 0;
+       priv->partly_opened = false;
 }
 
 
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
        info->napi_enabled = true;
 
        /* Enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 }
 
 
@@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev)
                       priv->network_cpus_count, priv->network_cpus_credits);
 #endif
 
-               priv->partly_opened = 1;
+               priv->partly_opened = true;
 
        } else {
                /* FIXME: Is this possible? */
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
        for (i = 0; i < sh->nr_frags; i++) {
 
                skb_frag_t *f = &sh->frags[i];
-               unsigned long pfn = page_to_pfn(f->page);
+               unsigned long pfn = page_to_pfn(skb_frag_page(f));
 
                /* FIXME: Compute "hash_for_home" properly. */
                /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
                /* FIXME: Hmmm. */
                if (!hash_default) {
                        void *va = pfn_to_kaddr(pfn) + f->page_offset;
-                       BUG_ON(PageHighMem(f->page));
+                       BUG_ON(PageHighMem(skb_frag_page(f)));
                        finv_buffer_remote(va, f->size, 0);
                }
 
index a8df7eca0956d2836f9c4562905b505aa10f7460..a9ce01bafd200c34265f33e4cff8d202719a5312 100644 (file)
@@ -1688,18 +1688,6 @@ static void tsi108_timed_checker(unsigned long dev_ptr)
        mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
 }
 
-static int tsi108_ether_init(void)
-{
-       int ret;
-       ret = platform_driver_register (&tsi_eth_driver);
-       if (ret < 0){
-               printk("tsi108_ether_init: error initializing ethernet "
-                      "device\n");
-               return ret;
-       }
-       return 0;
-}
-
 static int tsi108_ether_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
@@ -1714,13 +1702,7 @@ static int tsi108_ether_remove(struct platform_device *pdev)
 
        return 0;
 }
-static void tsi108_ether_exit(void)
-{
-       platform_driver_unregister(&tsi_eth_driver);
-}
-
-module_init(tsi108_ether_init);
-module_exit(tsi108_ether_exit);
+module_platform_driver(tsi_eth_driver);
 
 MODULE_AUTHOR("Tundra Semiconductor Corporation");
 MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
index f34dd99fe57917c546414c47607180bee2e59467..5c4983b2870a9c7a5dd7adefba08a8c054671a9b 100644 (file)
@@ -35,6 +35,7 @@
 #define DRV_VERSION    "1.5.0"
 #define DRV_RELDATE    "2010-10-09"
 
+#include <linux/types.h>
 
 /* A few user-configurable values.
    These may be modified when a driver module is loaded. */
@@ -55,7 +56,7 @@ static int rx_copybreak;
 
 /* Work-around for broken BIOSes: they are unable to get the chip back out of
    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
-static int avoid_D3;
+static bool avoid_D3;
 
 /*
  * In case you are looking for 'options[]' or 'full_duplex[]', they
@@ -488,8 +489,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
 static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
@@ -1261,7 +1262,7 @@ static void rhine_update_vcam(struct net_device *dev)
        rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
 }
 
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct rhine_private *rp = netdev_priv(dev);
 
@@ -1269,9 +1270,10 @@ static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, rp->active_vlans);
        rhine_update_vcam(dev);
        spin_unlock_irq(&rp->lock);
+       return 0;
 }
 
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct rhine_private *rp = netdev_priv(dev);
 
@@ -1279,6 +1281,7 @@ static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, rp->active_vlans);
        rhine_update_vcam(dev);
        spin_unlock_irq(&rp->lock);
+       return 0;
 }
 
 static void init_registers(struct net_device *dev)
@@ -2009,9 +2012,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
 {
        struct rhine_private *rp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(rp->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2320,7 +2323,7 @@ static int __init rhine_init(void)
 #endif
        if (dmi_check_system(rhine_dmi_table)) {
                /* these BIOSes fail at PXE boot if chip is in D3 */
-               avoid_D3 = 1;
+               avoid_D3 = true;
                pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
        }
        else if (avoid_D3)
index 4535d7cc848edb5a4d775c562e75eaae9b2032d2..4128d6b8cc28d02bdd2c59a217d1376e54786451 100644 (file)
@@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 }
 
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct velocity_info *vptr = netdev_priv(dev);
 
@@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, vptr->active_vlans);
        velocity_init_cam_filter(vptr);
        spin_unlock_irq(&vptr->lock);
+       return 0;
 }
 
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct velocity_info *vptr = netdev_priv(dev);
 
@@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
        clear_bit(vid, vptr->active_vlans);
        velocity_init_cam_filter(vptr);
        spin_unlock_irq(&vptr->lock);
+       return 0;
 }
 
 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
@@ -3270,9 +3272,9 @@ static int velocity_set_settings(struct net_device *dev,
 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct velocity_info *vptr = netdev_priv(dev);
-       strcpy(info->driver, VELOCITY_NAME);
-       strcpy(info->version, VELOCITY_VERSION);
-       strcpy(info->bus_info, pci_name(vptr->pdev));
+       strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+       strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
 }
 
 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
index caf3659e173cda3a04735cb1ecad7cfd94de27c7..f21addb1db95e89768eb431d4c634bb3e195b1d8 100644 (file)
@@ -114,6 +114,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
                return;
        temac_iow(lp, XTE_LSW0_OFFSET, value);
        temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
+       temac_indirect_busywait(lp);
 }
 
 /**
@@ -203,6 +204,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
        struct temac_local *lp = netdev_priv(ndev);
        int i;
 
+       /* Reset Local Link (DMA) */
+       lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+
        for (i = 0; i < RX_BD_NUM; i++) {
                if (!lp->rx_skb[i])
                        break;
@@ -233,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
        struct sk_buff *skb;
        int i;
 
-       lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+       lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
        if (!lp->rx_skb) {
                dev_err(&ndev->dev,
                                "can't allocate memory for DMA RX buffer\n");
@@ -860,6 +864,8 @@ static int temac_open(struct net_device *ndev)
                phy_start(lp->phy_dev);
        }
 
+       temac_device_reset(ndev);
+
        rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
        if (rc)
                goto err_tx_irq;
@@ -867,7 +873,6 @@ static int temac_open(struct net_device *ndev)
        if (rc)
                goto err_rx_irq;
 
-       temac_device_reset(ndev);
        return 0;
 
  err_rx_irq:
@@ -915,12 +920,26 @@ temac_poll_controller(struct net_device *ndev)
 }
 #endif
 
+static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+       struct temac_local *lp = netdev_priv(ndev);
+
+       if (!netif_running(ndev))
+               return -EINVAL;
+
+       if (!lp->phy_dev)
+               return -EINVAL;
+
+       return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
 static const struct net_device_ops temac_netdev_ops = {
        .ndo_open = temac_open,
        .ndo_stop = temac_stop,
        .ndo_start_xmit = temac_start_xmit,
        .ndo_set_mac_address = netdev_set_mac_address,
        .ndo_validate_addr = eth_validate_addr,
+       .ndo_do_ioctl = temac_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = temac_poll_controller,
 #endif
@@ -1072,7 +1091,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
 
        of_node_put(np); /* Finished with the DMA node; drop the reference */
 
-       if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+       if (!lp->rx_irq || !lp->tx_irq) {
                dev_err(&op->dev, "could not determine irqs\n");
                rc = -ENOMEM;
                goto err_iounmap_2;
@@ -1162,17 +1181,7 @@ static struct platform_driver temac_of_driver = {
        },
 };
 
-static int __init temac_init(void)
-{
-       return platform_driver_register(&temac_of_driver);
-}
-module_init(temac_init);
-
-static void __exit temac_exit(void)
-{
-       platform_driver_unregister(&temac_of_driver);
-}
-module_exit(temac_exit);
+module_platform_driver(temac_of_driver);
 
 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
 MODULE_AUTHOR("Yoshio Kashiwagi");
index 8018d7d045b044bbd0bb5dfa403e53d47d1bf390..79013e5731a5d94e424ec4ba499f8169646d345f 100644 (file)
@@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
  */
 static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
 {
-       bool tx_complete = 0;
+       bool tx_complete = false;
        struct net_device *dev = dev_id;
        struct net_local *lp = netdev_priv(dev);
        void __iomem *base_addr = lp->base_addr;
@@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
                out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
 
-               tx_complete = 1;
+               tx_complete = true;
        }
 
        /* Check if the Transmission for the second buffer is completed */
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
                out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
                         tx_status);
 
-               tx_complete = 1;
+               tx_complete = true;
        }
 
        /* If there was a Tx interrupt, call the Tx Handler */
@@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
 
        /* Get IRQ for the device */
        rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
-       if (rc == NO_IRQ) {
+       if (!rc) {
                dev_err(dev, "no IRQ found\n");
                return rc;
        }
@@ -1303,27 +1303,7 @@ static struct platform_driver xemaclite_of_driver = {
        .remove         = __devexit_p(xemaclite_of_remove),
 };
 
-/**
- * xgpiopss_init - Initial driver registration call
- *
- * Return:     0 upon success, or a negative error upon failure.
- */
-static int __init xemaclite_init(void)
-{
-       /* No kernel boot options used, we just need to register the driver */
-       return platform_driver_register(&xemaclite_of_driver);
-}
-
-/**
- * xemaclite_cleanup - Driver un-registration call
- */
-static void __exit xemaclite_cleanup(void)
-{
-       platform_driver_unregister(&xemaclite_of_driver);
-}
-
-module_init(xemaclite_init);
-module_exit(xemaclite_cleanup);
+module_platform_driver(xemaclite_of_driver);
 
 MODULE_AUTHOR("Xilinx, Inc.");
 MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
index bbe8b7dbf3f381aadba59d6900c9aca7002adc6d..33979c3ac943ebff6c106cb1fb787aca07ea57d4 100644 (file)
@@ -1411,7 +1411,7 @@ do_open(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, "xirc2ps_cs");
+       strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
        sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
 }
 
index 7393eb732ee6737b31fa28c70f5baf1c4e1f3499..95eb34fdbba7b2ac1996303675bc9b0b262cbadf 100644 (file)
@@ -36,4 +36,4 @@ config ROADRUNNER_LARGE_RINGS
          kernel code or by user space programs. Say Y here only if you have
          the memory.
 
-endif /* HIPPI */
+endif # HIPPI
index 46b5f5fd686bab98b98aff86fbf3e43b0702ddb8..e05b645bbc323559b39e6d42c69744159774767d 100644 (file)
@@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = {
        .ndo_validate_addr = eth_validate_addr,
 };
 
-#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST | \
+#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST | \
                      NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6      | \
                      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
 
index 9d4ce1aba10c1fdbc2ae6a8b1a20b40ee2134fb9..a561ae44a9ac1c982b1d5390a6f288abe0094523 100644 (file)
@@ -806,18 +806,7 @@ static struct platform_driver bfin_ir_driver = {
        },
 };
 
-static int __init bfin_sir_init(void)
-{
-       return platform_driver_register(&bfin_ir_driver);
-}
-
-static void __exit bfin_sir_exit(void)
-{
-       platform_driver_unregister(&bfin_ir_driver);
-}
-
-module_init(bfin_sir_init);
-module_exit(bfin_sir_exit);
+module_platform_driver(bfin_ir_driver);
 
 module_param(max_rate, int, 0);
 MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
index b45b2cc42804e647fded02501c2a4424927f941a..64f403da101cab6d4446328fd4dc972e506ffe20 100644 (file)
@@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME;
 
 static int max_baud = 4000000;
 #ifdef USE_PROBE
-static int do_probe = 0;
+static bool do_probe = false;
 #endif
 
 
index d0851dfa03780cd71a97633a5770ec2241baeb59..81d5275a15e298dc11a4208b00ffbbbf4cd1864b 100644 (file)
@@ -966,18 +966,7 @@ static struct platform_driver pxa_ir_driver = {
        .resume         = pxa_irda_resume,
 };
 
-static int __init pxa_irda_init(void)
-{
-       return platform_driver_register(&pxa_ir_driver);
-}
-
-static void __exit pxa_irda_exit(void)
-{
-       platform_driver_unregister(&pxa_ir_driver);
-}
-
-module_init(pxa_irda_init);
-module_exit(pxa_irda_exit);
+module_platform_driver(pxa_ir_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:pxa2xx-ir");
index d275e276e742a2234dc4d42c5df8647f09b49fa5..725d6b36782280b610dd5780e41d91b2561d3bd8 100644 (file)
@@ -873,18 +873,7 @@ static struct platform_driver sh_irda_driver = {
        },
 };
 
-static int __init sh_irda_init(void)
-{
-       return platform_driver_register(&sh_irda_driver);
-}
-
-static void __exit sh_irda_exit(void)
-{
-       platform_driver_unregister(&sh_irda_driver);
-}
-
-module_init(sh_irda_init);
-module_exit(sh_irda_exit);
+module_platform_driver(sh_irda_driver);
 
 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
 MODULE_DESCRIPTION("SuperH IrDA driver");
index ed7d7d62bf68ddff78d657fa3d93d264434264a0..e6661b5c1f83643e9bcfd1bcbf6094be5bc00897 100644 (file)
@@ -808,18 +808,7 @@ static struct platform_driver sh_sir_driver = {
        },
 };
 
-static int __init sh_sir_init(void)
-{
-       return platform_driver_register(&sh_sir_driver);
-}
-
-static void __exit sh_sir_exit(void)
-{
-       platform_driver_unregister(&sh_sir_driver);
-}
-
-module_init(sh_sir_init);
-module_exit(sh_sir_exit);
+module_platform_driver(sh_sir_driver);
 
 MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
 MODULE_DESCRIPTION("SuperH IrDA driver");
index 8b1c3484d271ac6a7aa6640d268e9e7013b52535..6c95d4087b2d54db2a4fbf309fc448244f33d731 100644 (file)
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
 MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
 MODULE_LICENSE("GPL");
 
-static int smsc_nopnp = 1;
+static bool smsc_nopnp = true;
 module_param_named(nopnp, smsc_nopnp, bool, 0);
 MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
 
index 4ce9e5f2c069876a0fd4ec6d1446c25408411a33..b71998d0b5b495132b4e07e80f7fae6a93f634ef 100644 (file)
@@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev)
        dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
                | NETIF_F_ALL_TSO
                | NETIF_F_UFO
-               | NETIF_F_NO_CSUM
+               | NETIF_F_HW_CSUM
                | NETIF_F_RXCSUM
                | NETIF_F_HIGHDMA
                | NETIF_F_LLTX
index 74134970b7095c4faae3f9f627358bd69617353d..f2f820c4b40a4fd8c3384905fb30b35dbe7c61fc 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include <linux/if_link.h>
 #include <linux/if_macvlan.h>
 #include <net/rtnetlink.h>
@@ -520,26 +521,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
        return stats;
 }
 
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
                                    unsigned short vid)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
-       const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-       if (ops->ndo_vlan_rx_add_vid)
-               ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+       return vlan_vid_add(lowerdev, vid);
 }
 
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
                                     unsigned short vid)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
-       const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-       if (ops->ndo_vlan_rx_kill_vid)
-               ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+       vlan_vid_del(lowerdev, vid);
+       return 0;
 }
 
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
index 1b7082d08f334b8d1d0ab7f350bb2de5a3d41c5c..58dc117a8d781b93ba91a258fe87de1e3bf5a7c8 100644 (file)
@@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
        if (vlan) {
                int index = get_slot(vlan, q);
 
-               rcu_assign_pointer(vlan->taps[index], NULL);
-               rcu_assign_pointer(q->vlan, NULL);
+               RCU_INIT_POINTER(vlan->taps[index], NULL);
+               RCU_INIT_POINTER(q->vlan, NULL);
                sock_put(&q->sk);
                --vlan->numvtaps;
        }
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
        if (!numvtaps)
                goto out;
 
+       /* Check if we can use flow to select a queue */
+       rxq = skb_get_rxhash(skb);
+       if (rxq) {
+               tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+               if (tap)
+                       goto out;
+       }
+
        if (likely(skb_rx_queue_recorded(skb))) {
                rxq = skb_get_rx_queue(skb);
 
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
                        goto out;
        }
 
-       /* Check if we can use flow to select a queue */
-       rxq = skb_get_rxhash(skb);
-       if (rxq) {
-               tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
-               if (tap)
-                       goto out;
-       }
-
        /* Everything failed - find first available queue */
        for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
                tap = rcu_dereference(vlan->taps[rxq]);
@@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev)
                                              lockdep_is_held(&macvtap_lock));
                if (q) {
                        qlist[j++] = q;
-                       rcu_assign_pointer(vlan->taps[i], NULL);
-                       rcu_assign_pointer(q->vlan, NULL);
+                       RCU_INIT_POINTER(vlan->taps[i], NULL);
+                       RCU_INIT_POINTER(q->vlan, NULL);
                        vlan->numvtaps--;
                }
        }
index c62e7816d54864d317b1c646c00c18bc78b97ead..c70c2332d15e94ea2b9620924dede4474f00ffef 100644 (file)
 
 static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
 {
-       u32 result = 0;
        int advert;
 
        advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
-       if (advert & LPA_LPACK)
-               result |= ADVERTISED_Autoneg;
-       if (advert & ADVERTISE_10HALF)
-               result |= ADVERTISED_10baseT_Half;
-       if (advert & ADVERTISE_10FULL)
-               result |= ADVERTISED_10baseT_Full;
-       if (advert & ADVERTISE_100HALF)
-               result |= ADVERTISED_100baseT_Half;
-       if (advert & ADVERTISE_100FULL)
-               result |= ADVERTISED_100baseT_Full;
-       if (advert & ADVERTISE_PAUSE_CAP)
-               result |= ADVERTISED_Pause;
-       if (advert & ADVERTISE_PAUSE_ASYM)
-               result |= ADVERTISED_Asym_Pause;
-
-       return result;
+
+       return mii_lpa_to_ethtool_lpa_t(advert);
 }
 
 /**
@@ -104,19 +89,14 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
                ecmd->autoneg = AUTONEG_ENABLE;
 
                ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
-               if (ctrl1000 & ADVERTISE_1000HALF)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Half;
-               if (ctrl1000 & ADVERTISE_1000FULL)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               if (mii->supports_gmii)
+                       ecmd->advertising |=
+                                       mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 
                if (bmsr & BMSR_ANEGCOMPLETE) {
                        ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
-                       if (stat1000 & LPA_1000HALF)
-                               ecmd->lp_advertising |=
-                                       ADVERTISED_1000baseT_Half;
-                       if (stat1000 & LPA_1000FULL)
-                               ecmd->lp_advertising |=
-                                       ADVERTISED_1000baseT_Full;
+                       ecmd->lp_advertising |=
+                                       mii_stat1000_to_ethtool_lpa_t(stat1000);
                } else {
                        ecmd->lp_advertising = 0;
                }
@@ -204,20 +184,11 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
                        advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
                        tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
                }
-               if (ecmd->advertising & ADVERTISED_10baseT_Half)
-                       tmp |= ADVERTISE_10HALF;
-               if (ecmd->advertising & ADVERTISED_10baseT_Full)
-                       tmp |= ADVERTISE_10FULL;
-               if (ecmd->advertising & ADVERTISED_100baseT_Half)
-                       tmp |= ADVERTISE_100HALF;
-               if (ecmd->advertising & ADVERTISED_100baseT_Full)
-                       tmp |= ADVERTISE_100FULL;
-               if (mii->supports_gmii) {
-                       if (ecmd->advertising & ADVERTISED_1000baseT_Half)
-                               tmp2 |= ADVERTISE_1000HALF;
-                       if (ecmd->advertising & ADVERTISED_1000baseT_Full)
-                               tmp2 |= ADVERTISE_1000FULL;
-               }
+               tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising);
+
+               if (mii->supports_gmii)
+                       tmp2 |=
+                             ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising);
                if (advert != tmp) {
                        mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
                        mii->advertising = tmp;
index bb88e12101c78b86b91def144677e4ecb10d8091..fbdcdf83cbfd81b142fb228460b2effddd266cad 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menuconfig PHYLIB
-       bool "PHY Device support and infrastructure"
+       tristate "PHY Device support and infrastructure"
        depends on !S390
        depends on NETDEVICES
        help
@@ -131,3 +131,7 @@ config MDIO_OCTEON
          If in doubt, say Y.
 
 endif # PHYLIB
+
+config MICREL_KS8995MA
+       tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+       depends on SPI
index 2333215bbb322e0ba7bce5705c34cc810f07eb7a..e15c83fecbe04d7b34f38ac0430bae294bde27b6 100644 (file)
@@ -23,3 +23,4 @@ obj-$(CONFIG_DP83640_PHY)     += dp83640.o
 obj-$(CONFIG_STE10XP)          += ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)       += micrel.o
 obj-$(CONFIG_MDIO_OCTEON)      += mdio-octeon.o
+obj-$(CONFIG_MICREL_KS8995MA)  += spi_ks8995.o
index 65391891d8c41a180592d500825df75f67b44e4c..daec9b05d168ca4f0f103f3638fcc3259e9ea304 100644 (file)
@@ -202,6 +202,14 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
        return 0;
 }
 
+static int mdiobb_reset(struct mii_bus *bus)
+{
+       struct mdiobb_ctrl *ctrl = bus->priv;
+       if (ctrl->reset)
+               ctrl->reset(bus);
+       return 0;
+}
+
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 {
        struct mii_bus *bus;
@@ -214,6 +222,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 
        bus->read = mdiobb_read;
        bus->write = mdiobb_write;
+       bus->reset = mdiobb_reset;
        bus->priv = ctrl;
 
        return bus;
index 2843c90f712f40c27c0688cb336dc87dbede054e..89c5a3eccc12daa760fa0e1e6c73e392c0a81f3e 100644 (file)
@@ -95,6 +95,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
                goto out;
 
        bitbang->ctrl.ops = &mdio_gpio_ops;
+       bitbang->ctrl.reset = pdata->reset;
        bitbang->mdc = pdata->mdc;
        bitbang->mdio = pdata->mdio;
 
index 83a5a5afec67f942f4a627cc0cd1b0f752325966..f320f466f03b2fe9a2c30ef139d18ab5f6ae675d 100644 (file)
@@ -563,20 +563,9 @@ static int genphy_config_advert(struct phy_device *phydev)
        if (adv < 0)
                return adv;
 
-       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | 
+       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
                 ADVERTISE_PAUSE_ASYM);
-       if (advertise & ADVERTISED_10baseT_Half)
-               adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               adv |= ADVERTISE_100FULL;
-       if (advertise & ADVERTISED_Pause)
-               adv |= ADVERTISE_PAUSE_CAP;
-       if (advertise & ADVERTISED_Asym_Pause)
-               adv |= ADVERTISE_PAUSE_ASYM;
+       adv |= ethtool_adv_to_mii_adv_t(advertise);
 
        if (adv != oldadv) {
                err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -595,10 +584,7 @@ static int genphy_config_advert(struct phy_device *phydev)
                        return adv;
 
                adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-               if (advertise & SUPPORTED_1000baseT_Half)
-                       adv |= ADVERTISE_1000HALF;
-               if (advertise & SUPPORTED_1000baseT_Full)
-                       adv |= ADVERTISE_1000FULL;
+               adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
 
                if (adv != oldadv) {
                        err = phy_write(phydev, MII_CTRL1000, adv);
index 342505c976d652095a2eac183e6df365a4de2475..fc3e7e96c88c64a3c9979a96596a6f0e016778ab 100644 (file)
 #include <linux/ethtool.h>
 #include <linux/phy.h>
 #include <linux/netdevice.h>
-
-#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
-#define MII_LAN83C185_IM  30 /* Interrupt Mask */
-#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
-
-#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
-#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
-#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
-#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
-#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
-#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
-#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
-
-#define MII_LAN83C185_ISF_INT_ALL (0x0e)
-
-#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
-       (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
-        MII_LAN83C185_ISF_INT7)
-
-#define MII_LAN83C185_EDPWRDOWN        (1 << 13) /* EDPWRDOWN */
+#include <linux/smscphy.h>
 
 static int smsc_phy_config_intr(struct phy_device *phydev)
 {
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
new file mode 100644 (file)
index 0000000..116a2dd
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ *     Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION            "0.1.1"
+#define DRV_DESC               "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0         0x00    /* Chip ID0 */
+#define KS8995_REG_ID1         0x01    /* Chip ID1 */
+
+#define KS8995_REG_GC0         0x02    /* Global Control 0 */
+#define KS8995_REG_GC1         0x03    /* Global Control 1 */
+#define KS8995_REG_GC2         0x04    /* Global Control 2 */
+#define KS8995_REG_GC3         0x05    /* Global Control 3 */
+#define KS8995_REG_GC4         0x06    /* Global Control 4 */
+#define KS8995_REG_GC5         0x07    /* Global Control 5 */
+#define KS8995_REG_GC6         0x08    /* Global Control 6 */
+#define KS8995_REG_GC7         0x09    /* Global Control 7 */
+#define KS8995_REG_GC8         0x0a    /* Global Control 8 */
+#define KS8995_REG_GC9         0x0b    /* Global Control 9 */
+
+#define KS8995_REG_PC(p, r)    ((0x10 * p) + r)         /* Port Control */
+#define KS8995_REG_PS(p, r)    ((0x10 * p) + r + 0xe)  /* Port Status */
+
+#define KS8995_REG_TPC0                0x60    /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1                0x61    /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2                0x62    /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3                0x63    /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4                0x64    /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5                0x65    /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6                0x66    /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7                0x67    /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0                0x68    /* MAC address 0 */
+#define KS8995_REG_MAC1                0x69    /* MAC address 1 */
+#define KS8995_REG_MAC2                0x6a    /* MAC address 2 */
+#define KS8995_REG_MAC3                0x6b    /* MAC address 3 */
+#define KS8995_REG_MAC4                0x6c    /* MAC address 4 */
+#define KS8995_REG_MAC5                0x6d    /* MAC address 5 */
+
+#define KS8995_REG_IAC0                0x6e    /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1                0x6f    /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7                0x70    /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6                0x71    /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5                0x72    /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4                0x73    /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3                0x74    /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2                0x75    /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1                0x76    /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0                0x77    /* Indirect Access Data 0 */
+
+#define KS8995_REGS_SIZE       0x80
+
+#define ID1_CHIPID_M           0xf
+#define ID1_CHIPID_S           4
+#define ID1_REVISION_M         0x7
+#define ID1_REVISION_S         1
+#define ID1_START_SW           1       /* start the switch */
+
+#define FAMILY_KS8995          0x95
+#define CHIPID_M               0
+
+#define KS8995_CMD_WRITE       0x02U
+#define KS8995_CMD_READ                0x03U
+
+#define KS8995_RESET_DELAY     10 /* usec */
+
+struct ks8995_pdata {
+       /* not yet implemented */
+};
+
+struct ks8995_switch {
+       struct spi_device       *spi;
+       struct mutex            lock;
+       struct ks8995_pdata     *pdata;
+};
+
+static inline u8 get_chip_id(u8 val)
+{
+       return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+       return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+                unsigned offset, size_t count)
+{
+       u8 cmd[2];
+       struct spi_transfer t[2];
+       struct spi_message m;
+       int err;
+
+       spi_message_init(&m);
+
+       memset(&t, 0, sizeof(t));
+
+       t[0].tx_buf = cmd;
+       t[0].len = sizeof(cmd);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].rx_buf = buf;
+       t[1].len = count;
+       spi_message_add_tail(&t[1], &m);
+
+       cmd[0] = KS8995_CMD_READ;
+       cmd[1] = offset;
+
+       mutex_lock(&ks->lock);
+       err = spi_sync(ks->spi, &m);
+       mutex_unlock(&ks->lock);
+
+       return err ? err : count;
+}
+
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+                unsigned offset, size_t count)
+{
+       u8 cmd[2];
+       struct spi_transfer t[2];
+       struct spi_message m;
+       int err;
+
+       spi_message_init(&m);
+
+       memset(&t, 0, sizeof(t));
+
+       t[0].tx_buf = cmd;
+       t[0].len = sizeof(cmd);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].tx_buf = buf;
+       t[1].len = count;
+       spi_message_add_tail(&t[1], &m);
+
+       cmd[0] = KS8995_CMD_WRITE;
+       cmd[1] = offset;
+
+       mutex_lock(&ks->lock);
+       err = spi_sync(ks->spi, &m);
+       mutex_unlock(&ks->lock);
+
+       return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+       return (ks8995_read(ks, buf, addr, 1) != 1);
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+       char buf = val;
+
+       return (ks8995_write(ks, &buf, addr, 1) != 1);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+       return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+       return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+       int err;
+
+       err = ks8995_stop(ks);
+       if (err)
+               return err;
+
+       udelay(KS8995_RESET_DELAY);
+
+       return ks8995_start(ks);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+       struct device *dev;
+       struct ks8995_switch *ks8995;
+
+       dev = container_of(kobj, struct device, kobj);
+       ks8995 = dev_get_drvdata(dev);
+
+       if (unlikely(off > KS8995_REGS_SIZE))
+               return 0;
+
+       if ((off + count) > KS8995_REGS_SIZE)
+               count = KS8995_REGS_SIZE - off;
+
+       if (unlikely(!count))
+               return count;
+
+       return ks8995_read(ks8995, buf, off, count);
+}
+
+
+static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+       struct device *dev;
+       struct ks8995_switch *ks8995;
+
+       dev = container_of(kobj, struct device, kobj);
+       ks8995 = dev_get_drvdata(dev);
+
+       if (unlikely(off >= KS8995_REGS_SIZE))
+               return -EFBIG;
+
+       if ((off + count) > KS8995_REGS_SIZE)
+               count = KS8995_REGS_SIZE - off;
+
+       if (unlikely(!count))
+               return count;
+
+       return ks8995_write(ks8995, buf, off, count);
+}
+
+
+static struct bin_attribute ks8995_registers_attr = {
+       .attr = {
+               .name   = "registers",
+               .mode   = S_IRUSR | S_IWUSR,
+       },
+       .size   = KS8995_REGS_SIZE,
+       .read   = ks8995_registers_read,
+       .write  = ks8995_registers_write,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int __devinit ks8995_probe(struct spi_device *spi)
+{
+       struct ks8995_switch    *ks;
+       struct ks8995_pdata     *pdata;
+       u8      ids[2];
+       int     err;
+
+       /* Chip description */
+       pdata = spi->dev.platform_data;
+
+       ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+       if (!ks) {
+               dev_err(&spi->dev, "no memory for private data\n");
+               return -ENOMEM;
+       }
+
+       mutex_init(&ks->lock);
+       ks->pdata = pdata;
+       ks->spi = spi_dev_get(spi);
+       dev_set_drvdata(&spi->dev, ks);
+
+       spi->mode = SPI_MODE_0;
+       spi->bits_per_word = 8;
+       err = spi_setup(spi);
+       if (err) {
+               dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+               goto err_drvdata;
+       }
+
+       err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
+       if (err < 0) {
+               dev_err(&spi->dev, "unable to read id registers, err=%d\n",
+                               err);
+               goto err_drvdata;
+       }
+
+       switch (ids[0]) {
+       case FAMILY_KS8995:
+               break;
+       default:
+               dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
+               err = -ENODEV;
+               goto err_drvdata;
+       }
+
+       err = ks8995_reset(ks);
+       if (err)
+               goto err_drvdata;
+
+       err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+       if (err) {
+               dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+                                   err);
+               goto err_drvdata;
+       }
+
+       dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
+                       "Revision:%01x\n", ids[0],
+                       get_chip_id(ids[1]), get_chip_rev(ids[1]));
+
+       return 0;
+
+err_drvdata:
+       dev_set_drvdata(&spi->dev, NULL);
+       kfree(ks);
+       return err;
+}
+
+static int __devexit ks8995_remove(struct spi_device *spi)
+{
+       struct ks8995_data      *ks8995;
+
+       ks8995 = dev_get_drvdata(&spi->dev);
+       sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+
+       dev_set_drvdata(&spi->dev, NULL);
+       kfree(ks8995);
+
+       return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct spi_driver ks8995_driver = {
+       .driver = {
+               .name       = "spi-ks8995",
+               .bus         = &spi_bus_type,
+               .owner     = THIS_MODULE,
+       },
+       .probe    = ks8995_probe,
+       .remove   = __devexit_p(ks8995_remove),
+};
+
+static int __init ks8995_init(void)
+{
+       printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+
+       return spi_register_driver(&ks8995_driver);
+}
+module_init(ks8995_init);
+
+static void __exit ks8995_exit(void)
+{
+       spi_unregister_driver(&ks8995_driver);
+}
+module_exit(ks8995_exit);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
index 89f829f5f7257fcacd54902d3963520ab5b47525..c1c9293c2bbf1eecd638d8d6e428777951cdff9d 100644 (file)
@@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock)
 {
        spin_lock(&chan_lock);
        clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
-       rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+       RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
        spin_unlock(&chan_lock);
        synchronize_rcu();
 }
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
        lock_sock(sk);
 
        opt->src_addr = sp->sa_addr.pptp;
-       if (add_chan(po)) {
-               release_sock(sk);
+       if (add_chan(po))
                error = -EBUSY;
-       }
 
        release_sock(sk);
        return error;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
new file mode 100644 (file)
index 0000000..248a144
--- /dev/null
@@ -0,0 +1,43 @@
+menuconfig NET_TEAM
+       tristate "Ethernet team driver support (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       ---help---
+         This allows one to create virtual interfaces that teams together
+         multiple ethernet devices.
+
+         Team devices can be added using the "ip" command from the
+         iproute2 package:
+
+         "ip link add link [ address MAC ] [ NAME ] type team"
+
+         To compile this driver as a module, choose M here: the module
+         will be called team.
+
+if NET_TEAM
+
+config NET_TEAM_MODE_ROUNDROBIN
+       tristate "Round-robin mode support"
+       depends on NET_TEAM
+       ---help---
+         Basic mode where port used for transmitting packets is selected in
+         round-robin fashion using packet counter.
+
+         All added ports are setup to have bond's mac address.
+
+         To compile this team mode as a module, choose M here: the module
+         will be called team_mode_roundrobin.
+
+config NET_TEAM_MODE_ACTIVEBACKUP
+       tristate "Active-backup mode support"
+       depends on NET_TEAM
+       ---help---
+         Only one port is active at a time and the rest of ports are used
+         for backup.
+
+         Mac addresses of ports are not modified. Userspace is responsible
+         to do so.
+
+         To compile this team mode as a module, choose M here: the module
+         will be called team_mode_activebackup.
+
+endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
new file mode 100644 (file)
index 0000000..85f2028
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for the network team driver
+#
+
+obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
new file mode 100644 (file)
index 0000000..ed2a862
--- /dev/null
@@ -0,0 +1,1684 @@
+/*
+ * net/drivers/team/team.c - Network team device driver
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/if_team.h>
+
+#define DRV_NAME "team"
+
+
+/**********
+ * Helpers
+ **********/
+
+#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
+
+static struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+       struct team_port *port = rcu_dereference(dev->rx_handler_data);
+
+       return team_port_exists(dev) ? port : NULL;
+}
+
+static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+{
+       struct team_port *port = rtnl_dereference(dev->rx_handler_data);
+
+       return team_port_exists(dev) ? port : NULL;
+}
+
+/*
+ * Since the ability to change mac address for open port device is tested in
+ * team_port_add, this function can be called without control of return value
+ */
+static int __set_port_mac(struct net_device *port_dev,
+                         const unsigned char *dev_addr)
+{
+       struct sockaddr addr;
+
+       memcpy(addr.sa_data, dev_addr, ETH_ALEN);
+       addr.sa_family = ARPHRD_ETHER;
+       return dev_set_mac_address(port_dev, &addr);
+}
+
+int team_port_set_orig_mac(struct team_port *port)
+{
+       return __set_port_mac(port->dev, port->orig.dev_addr);
+}
+
+int team_port_set_team_mac(struct team_port *port)
+{
+       return __set_port_mac(port->dev, port->team->dev->dev_addr);
+}
+EXPORT_SYMBOL(team_port_set_team_mac);
+
+
+/*******************
+ * Options handling
+ *******************/
+
+struct team_option *__team_find_option(struct team *team, const char *opt_name)
+{
+       struct team_option *option;
+
+       list_for_each_entry(option, &team->option_list, list) {
+               if (strcmp(option->name, opt_name) == 0)
+                       return option;
+       }
+       return NULL;
+}
+
+int team_options_register(struct team *team,
+                         const struct team_option *option,
+                         size_t option_count)
+{
+       int i;
+       struct team_option **dst_opts;
+       int err;
+
+       dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+                          GFP_KERNEL);
+       if (!dst_opts)
+               return -ENOMEM;
+       for (i = 0; i < option_count; i++, option++) {
+               if (__team_find_option(team, option->name)) {
+                       err = -EEXIST;
+                       goto rollback;
+               }
+               dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
+               if (!dst_opts[i]) {
+                       err = -ENOMEM;
+                       goto rollback;
+               }
+       }
+
+       for (i = 0; i < option_count; i++)
+               list_add_tail(&dst_opts[i]->list, &team->option_list);
+
+       kfree(dst_opts);
+       return 0;
+
+rollback:
+       for (i = 0; i < option_count; i++)
+               kfree(dst_opts[i]);
+
+       kfree(dst_opts);
+       return err;
+}
+
+EXPORT_SYMBOL(team_options_register);
+
+static void __team_options_change_check(struct team *team,
+                                       struct team_option *changed_option);
+
+static void __team_options_unregister(struct team *team,
+                                     const struct team_option *option,
+                                     size_t option_count)
+{
+       int i;
+
+       for (i = 0; i < option_count; i++, option++) {
+               struct team_option *del_opt;
+
+               del_opt = __team_find_option(team, option->name);
+               if (del_opt) {
+                       list_del(&del_opt->list);
+                       kfree(del_opt);
+               }
+       }
+}
+
+void team_options_unregister(struct team *team,
+                            const struct team_option *option,
+                            size_t option_count)
+{
+       __team_options_unregister(team, option, option_count);
+       __team_options_change_check(team, NULL);
+}
+EXPORT_SYMBOL(team_options_unregister);
+
+static int team_option_get(struct team *team, struct team_option *option,
+                          void *arg)
+{
+       return option->getter(team, arg);
+}
+
+static int team_option_set(struct team *team, struct team_option *option,
+                          void *arg)
+{
+       int err;
+
+       err = option->setter(team, arg);
+       if (err)
+               return err;
+
+       __team_options_change_check(team, option);
+       return err;
+}
+
+/****************
+ * Mode handling
+ ****************/
+
+static LIST_HEAD(mode_list);
+static DEFINE_SPINLOCK(mode_list_lock);
+
+static struct team_mode *__find_mode(const char *kind)
+{
+       struct team_mode *mode;
+
+       list_for_each_entry(mode, &mode_list, list) {
+               if (strcmp(mode->kind, kind) == 0)
+                       return mode;
+       }
+       return NULL;
+}
+
+static bool is_good_mode_name(const char *name)
+{
+       while (*name != '\0') {
+               if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+                       return false;
+               name++;
+       }
+       return true;
+}
+
+int team_mode_register(struct team_mode *mode)
+{
+       int err = 0;
+
+       if (!is_good_mode_name(mode->kind) ||
+           mode->priv_size > TEAM_MODE_PRIV_SIZE)
+               return -EINVAL;
+       spin_lock(&mode_list_lock);
+       if (__find_mode(mode->kind)) {
+               err = -EEXIST;
+               goto unlock;
+       }
+       list_add_tail(&mode->list, &mode_list);
+unlock:
+       spin_unlock(&mode_list_lock);
+       return err;
+}
+EXPORT_SYMBOL(team_mode_register);
+
+int team_mode_unregister(struct team_mode *mode)
+{
+       spin_lock(&mode_list_lock);
+       list_del_init(&mode->list);
+       spin_unlock(&mode_list_lock);
+       return 0;
+}
+EXPORT_SYMBOL(team_mode_unregister);
+
+static struct team_mode *team_mode_get(const char *kind)
+{
+       struct team_mode *mode;
+
+       spin_lock(&mode_list_lock);
+       mode = __find_mode(kind);
+       if (!mode) {
+               spin_unlock(&mode_list_lock);
+               request_module("team-mode-%s", kind);
+               spin_lock(&mode_list_lock);
+               mode = __find_mode(kind);
+       }
+       if (mode)
+               if (!try_module_get(mode->owner))
+                       mode = NULL;
+
+       spin_unlock(&mode_list_lock);
+       return mode;
+}
+
+static void team_mode_put(const struct team_mode *mode)
+{
+       module_put(mode->owner);
+}
+
+static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
+{
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+rx_handler_result_t team_dummy_receive(struct team *team,
+                                      struct team_port *port,
+                                      struct sk_buff *skb)
+{
+       return RX_HANDLER_ANOTHER;
+}
+
+static void team_adjust_ops(struct team *team)
+{
+       /*
+        * To avoid checks in rx/tx skb paths, ensure here that non-null and
+        * correct ops are always set.
+        */
+
+       if (list_empty(&team->port_list) ||
+           !team->mode || !team->mode->ops->transmit)
+               team->ops.transmit = team_dummy_transmit;
+       else
+               team->ops.transmit = team->mode->ops->transmit;
+
+       if (list_empty(&team->port_list) ||
+           !team->mode || !team->mode->ops->receive)
+               team->ops.receive = team_dummy_receive;
+       else
+               team->ops.receive = team->mode->ops->receive;
+}
+
+/*
+ * We can benefit from the fact that it's ensured no port is present
+ * at the time of mode change. Therefore no packets are in fly so there's no
+ * need to set mode operations in any special way.
+ */
+static int __team_change_mode(struct team *team,
+                             const struct team_mode *new_mode)
+{
+       /* Check if mode was previously set and do cleanup if so */
+       if (team->mode) {
+               void (*exit_op)(struct team *team) = team->ops.exit;
+
+               /* Clear ops area so no callback is called any longer */
+               memset(&team->ops, 0, sizeof(struct team_mode_ops));
+               team_adjust_ops(team);
+
+               if (exit_op)
+                       exit_op(team);
+               team_mode_put(team->mode);
+               team->mode = NULL;
+               /* zero private data area */
+               memset(&team->mode_priv, 0,
+                      sizeof(struct team) - offsetof(struct team, mode_priv));
+       }
+
+       if (!new_mode)
+               return 0;
+
+       if (new_mode->ops->init) {
+               int err;
+
+               err = new_mode->ops->init(team);
+               if (err)
+                       return err;
+       }
+
+       team->mode = new_mode;
+       memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
+       team_adjust_ops(team);
+
+       return 0;
+}
+
+static int team_change_mode(struct team *team, const char *kind)
+{
+       struct team_mode *new_mode;
+       struct net_device *dev = team->dev;
+       int err;
+
+       if (!list_empty(&team->port_list)) {
+               netdev_err(dev, "No ports can be present during mode change\n");
+               return -EBUSY;
+       }
+
+       if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+               netdev_err(dev, "Unable to change to the same mode the team is in\n");
+               return -EINVAL;
+       }
+
+       new_mode = team_mode_get(kind);
+       if (!new_mode) {
+               netdev_err(dev, "Mode \"%s\" not found\n", kind);
+               return -EINVAL;
+       }
+
+       err = __team_change_mode(team, new_mode);
+       if (err) {
+               netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
+               team_mode_put(new_mode);
+               return err;
+       }
+
+       netdev_info(dev, "Mode changed to \"%s\"\n", kind);
+       return 0;
+}
+
+
+/************************
+ * Rx path frame handler
+ ************************/
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+       struct team_port *port;
+       struct team *team;
+       rx_handler_result_t res;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               return RX_HANDLER_CONSUMED;
+
+       *pskb = skb;
+
+       port = team_port_get_rcu(skb->dev);
+       team = port->team;
+
+       res = team->ops.receive(team, port, skb);
+       if (res == RX_HANDLER_ANOTHER) {
+               struct team_pcpu_stats *pcpu_stats;
+
+               pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->rx_packets++;
+               pcpu_stats->rx_bytes += skb->len;
+               if (skb->pkt_type == PACKET_MULTICAST)
+                       pcpu_stats->rx_multicast++;
+               u64_stats_update_end(&pcpu_stats->syncp);
+
+               skb->dev = team->dev;
+       } else {
+               this_cpu_inc(team->pcpu_stats->rx_dropped);
+       }
+
+       return res;
+}
+
+
+/****************
+ * Port handling
+ ****************/
+
+static bool team_port_find(const struct team *team,
+                          const struct team_port *port)
+{
+       struct team_port *cur;
+
+       list_for_each_entry(cur, &team->port_list, list)
+               if (cur == port)
+                       return true;
+       return false;
+}
+
+/*
+ * Add/delete port to the team port list. Write guarded by rtnl_lock.
+ * Takes care of correct port->index setup (might be racy).
+ */
+static void team_port_list_add_port(struct team *team,
+                                   struct team_port *port)
+{
+       port->index = team->port_count++;
+       hlist_add_head_rcu(&port->hlist,
+                          team_port_index_hash(team, port->index));
+       list_add_tail_rcu(&port->list, &team->port_list);
+}
+
+static void __reconstruct_port_hlist(struct team *team, int rm_index)
+{
+       int i;
+       struct team_port *port;
+
+       for (i = rm_index + 1; i < team->port_count; i++) {
+               port = team_get_port_by_index(team, i);
+               hlist_del_rcu(&port->hlist);
+               port->index--;
+               hlist_add_head_rcu(&port->hlist,
+                                  team_port_index_hash(team, port->index));
+       }
+}
+
+static void team_port_list_del_port(struct team *team,
+                                  struct team_port *port)
+{
+       int rm_index = port->index;
+
+       hlist_del_rcu(&port->hlist);
+       list_del_rcu(&port->list);
+       __reconstruct_port_hlist(team, rm_index);
+       team->port_count--;
+}
+
+#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                           NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                           NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+static void __team_compute_features(struct team *team)
+{
+       struct team_port *port;
+       u32 vlan_features = TEAM_VLAN_FEATURES;
+       unsigned short max_hard_header_len = ETH_HLEN;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               vlan_features = netdev_increment_features(vlan_features,
+                                       port->dev->vlan_features,
+                                       TEAM_VLAN_FEATURES);
+
+               if (port->dev->hard_header_len > max_hard_header_len)
+                       max_hard_header_len = port->dev->hard_header_len;
+       }
+
+       team->dev->vlan_features = vlan_features;
+       team->dev->hard_header_len = max_hard_header_len;
+
+       netdev_change_features(team->dev);
+}
+
+static void team_compute_features(struct team *team)
+{
+       mutex_lock(&team->lock);
+       __team_compute_features(team);
+       mutex_unlock(&team->lock);
+}
+
+static int team_port_enter(struct team *team, struct team_port *port)
+{
+       int err = 0;
+
+       dev_hold(team->dev);
+       port->dev->priv_flags |= IFF_TEAM_PORT;
+       if (team->ops.port_enter) {
+               err = team->ops.port_enter(team, port);
+               if (err) {
+                       netdev_err(team->dev, "Device %s failed to enter team mode\n",
+                                  port->dev->name);
+                       goto err_port_enter;
+               }
+       }
+
+       return 0;
+
+err_port_enter:
+       port->dev->priv_flags &= ~IFF_TEAM_PORT;
+       dev_put(team->dev);
+
+       return err;
+}
+
+static void team_port_leave(struct team *team, struct team_port *port)
+{
+       if (team->ops.port_leave)
+               team->ops.port_leave(team, port);
+       port->dev->priv_flags &= ~IFF_TEAM_PORT;
+       dev_put(team->dev);
+}
+
+static void __team_port_change_check(struct team_port *port, bool linkup);
+
+static int team_port_add(struct team *team, struct net_device *port_dev)
+{
+       struct net_device *dev = team->dev;
+       struct team_port *port;
+       char *portname = port_dev->name;
+       int err;
+
+       if (port_dev->flags & IFF_LOOPBACK ||
+           port_dev->type != ARPHRD_ETHER) {
+               netdev_err(dev, "Device %s is of an unsupported type\n",
+                          portname);
+               return -EINVAL;
+       }
+
+       if (team_port_exists(port_dev)) {
+               netdev_err(dev, "Device %s is already a port "
+                               "of a team device\n", portname);
+               return -EBUSY;
+       }
+
+       if (port_dev->flags & IFF_UP) {
+               netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+                          portname);
+               return -EBUSY;
+       }
+
+       port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+       if (!port)
+               return -ENOMEM;
+
+       port->dev = port_dev;
+       port->team = team;
+
+       port->orig.mtu = port_dev->mtu;
+       err = dev_set_mtu(port_dev, dev->mtu);
+       if (err) {
+               netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+               goto err_set_mtu;
+       }
+
+       memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+
+       err = team_port_enter(team, port);
+       if (err) {
+               netdev_err(dev, "Device %s failed to enter team mode\n",
+                          portname);
+               goto err_port_enter;
+       }
+
+       err = dev_open(port_dev);
+       if (err) {
+               netdev_dbg(dev, "Device %s opening failed\n",
+                          portname);
+               goto err_dev_open;
+       }
+
+       err = vlan_vids_add_by_dev(port_dev, dev);
+       if (err) {
+               netdev_err(dev, "Failed to add vlan ids to device %s\n",
+                               portname);
+               goto err_vids_add;
+       }
+
+       err = netdev_set_master(port_dev, dev);
+       if (err) {
+               netdev_err(dev, "Device %s failed to set master\n", portname);
+               goto err_set_master;
+       }
+
+       err = netdev_rx_handler_register(port_dev, team_handle_frame,
+                                        port);
+       if (err) {
+               netdev_err(dev, "Device %s failed to register rx_handler\n",
+                          portname);
+               goto err_handler_register;
+       }
+
+       team_port_list_add_port(team, port);
+       team_adjust_ops(team);
+       __team_compute_features(team);
+       __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+
+       netdev_info(dev, "Port device %s added\n", portname);
+
+       return 0;
+
+err_handler_register:
+       netdev_set_master(port_dev, NULL);
+
+err_set_master:
+       vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
+       dev_close(port_dev);
+
+err_dev_open:
+       team_port_leave(team, port);
+       team_port_set_orig_mac(port);
+
+err_port_enter:
+       dev_set_mtu(port_dev, port->orig.mtu);
+
+err_set_mtu:
+       kfree(port);
+
+       return err;
+}
+
+static int team_port_del(struct team *team, struct net_device *port_dev)
+{
+       struct net_device *dev = team->dev;
+       struct team_port *port;
+       char *portname = port_dev->name;
+
+       port = team_port_get_rtnl(port_dev);
+       if (!port || !team_port_find(team, port)) {
+               netdev_err(dev, "Device %s does not act as a port of this team\n",
+                          portname);
+               return -ENOENT;
+       }
+
+       __team_port_change_check(port, false);
+       team_port_list_del_port(team, port);
+       team_adjust_ops(team);
+       netdev_rx_handler_unregister(port_dev);
+       netdev_set_master(port_dev, NULL);
+       vlan_vids_del_by_dev(port_dev, dev);
+       dev_close(port_dev);
+       team_port_leave(team, port);
+       team_port_set_orig_mac(port);
+       dev_set_mtu(port_dev, port->orig.mtu);
+       synchronize_rcu();
+       kfree(port);
+       netdev_info(dev, "Port device %s removed\n", portname);
+       __team_compute_features(team);
+
+       return 0;
+}
+
+
+/*****************
+ * Net device ops
+ *****************/
+
+static const char team_no_mode_kind[] = "*NOMODE*";
+
+static int team_mode_option_get(struct team *team, void *arg)
+{
+       const char **str = arg;
+
+       *str = team->mode ? team->mode->kind : team_no_mode_kind;
+       return 0;
+}
+
+static int team_mode_option_set(struct team *team, void *arg)
+{
+       const char **str = arg;
+
+       return team_change_mode(team, *str);
+}
+
+static const struct team_option team_options[] = {
+       {
+               .name = "mode",
+               .type = TEAM_OPTION_TYPE_STRING,
+               .getter = team_mode_option_get,
+               .setter = team_mode_option_set,
+       },
+};
+
+static int team_init(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       int i;
+       int err;
+
+       team->dev = dev;
+       mutex_init(&team->lock);
+
+       team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+       if (!team->pcpu_stats)
+               return -ENOMEM;
+
+       for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
+               INIT_HLIST_HEAD(&team->port_hlist[i]);
+       INIT_LIST_HEAD(&team->port_list);
+
+       team_adjust_ops(team);
+
+       INIT_LIST_HEAD(&team->option_list);
+       err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
+       if (err)
+               goto err_options_register;
+       netif_carrier_off(dev);
+
+       return 0;
+
+err_options_register:
+       free_percpu(team->pcpu_stats);
+
+       return err;
+}
+
+static void team_uninit(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       struct team_port *tmp;
+
+       mutex_lock(&team->lock);
+       list_for_each_entry_safe(port, tmp, &team->port_list, list)
+               team_port_del(team, port->dev);
+
+       __team_change_mode(team, NULL); /* cleanup */
+       __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+       mutex_unlock(&team->lock);
+}
+
+static void team_destructor(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+
+       free_percpu(team->pcpu_stats);
+       free_netdev(dev);
+}
+
+static int team_open(struct net_device *dev)
+{
+       netif_carrier_on(dev);
+       return 0;
+}
+
+static int team_close(struct net_device *dev)
+{
+       netif_carrier_off(dev);
+       return 0;
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       bool tx_success = false;
+       unsigned int len = skb->len;
+
+       tx_success = team->ops.transmit(team, skb);
+       if (tx_success) {
+               struct team_pcpu_stats *pcpu_stats;
+
+               pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->tx_packets++;
+               pcpu_stats->tx_bytes += len;
+               u64_stats_update_end(&pcpu_stats->syncp);
+       } else {
+               this_cpu_inc(team->pcpu_stats->tx_dropped);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static void team_change_rx_flags(struct net_device *dev, int change)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       int inc;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               if (change & IFF_PROMISC) {
+                       inc = dev->flags & IFF_PROMISC ? 1 : -1;
+                       dev_set_promiscuity(port->dev, inc);
+               }
+               if (change & IFF_ALLMULTI) {
+                       inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
+                       dev_set_allmulti(port->dev, inc);
+               }
+       }
+       rcu_read_unlock();
+}
+
+static void team_set_rx_mode(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               dev_uc_sync(port->dev, dev);
+               dev_mc_sync(port->dev, dev);
+       }
+       rcu_read_unlock();
+}
+
+static int team_set_mac_address(struct net_device *dev, void *p)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       struct sockaddr *addr = p;
+
+       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list)
+               if (team->ops.port_change_mac)
+                       team->ops.port_change_mac(team, port);
+       rcu_read_unlock();
+       return 0;
+}
+
+static int team_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       int err;
+
+       /*
+        * Alhough this is reader, it's guarded by team lock. It's not possible
+        * to traverse list in reverse under rcu_read_lock
+        */
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list) {
+               err = dev_set_mtu(port->dev, new_mtu);
+               if (err) {
+                       netdev_err(dev, "Device %s failed to change mtu",
+                                  port->dev->name);
+                       goto unwind;
+               }
+       }
+       mutex_unlock(&team->lock);
+
+       dev->mtu = new_mtu;
+
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(port, &team->port_list, list)
+               dev_set_mtu(port->dev, dev->mtu);
+       mutex_unlock(&team->lock);
+
+       return err;
+}
+
+static struct rtnl_link_stats64 *
+team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_pcpu_stats *p;
+       u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+       u32 rx_dropped = 0, tx_dropped = 0;
+       unsigned int start;
+       int i;
+
+       for_each_possible_cpu(i) {
+               p = per_cpu_ptr(team->pcpu_stats, i);
+               do {
+                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       rx_packets      = p->rx_packets;
+                       rx_bytes        = p->rx_bytes;
+                       rx_multicast    = p->rx_multicast;
+                       tx_packets      = p->tx_packets;
+                       tx_bytes        = p->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+               stats->rx_packets       += rx_packets;
+               stats->rx_bytes         += rx_bytes;
+               stats->multicast        += rx_multicast;
+               stats->tx_packets       += tx_packets;
+               stats->tx_bytes         += tx_bytes;
+               /*
+                * rx_dropped & tx_dropped are u32, updated
+                * without syncp protection.
+                */
+               rx_dropped      += p->rx_dropped;
+               tx_dropped      += p->tx_dropped;
+       }
+       stats->rx_dropped       = rx_dropped;
+       stats->tx_dropped       = tx_dropped;
+       return stats;
+}
+
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       int err;
+
+       /*
+        * Alhough this is reader, it's guarded by team lock. It's not possible
+        * to traverse list in reverse under rcu_read_lock
+        */
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list) {
+               err = vlan_vid_add(port->dev, vid);
+               if (err)
+                       goto unwind;
+       }
+       mutex_unlock(&team->lock);
+
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(port, &team->port_list, list)
+               vlan_vid_del(port->dev, vid);
+       mutex_unlock(&team->lock);
+
+       return err;
+}
+
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list)
+               vlan_vid_del(port->dev, vid);
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       struct team *team = netdev_priv(dev);
+       int err;
+
+       mutex_lock(&team->lock);
+       err = team_port_add(team, port_dev);
+       mutex_unlock(&team->lock);
+       return err;
+}
+
+static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       struct team *team = netdev_priv(dev);
+       int err;
+
+       mutex_lock(&team->lock);
+       err = team_port_del(team, port_dev);
+       mutex_unlock(&team->lock);
+       return err;
+}
+
+static netdev_features_t team_fix_features(struct net_device *dev,
+                                          netdev_features_t features)
+{
+       struct team_port *port;
+       struct team *team = netdev_priv(dev);
+       netdev_features_t mask;
+
+       mask = features;
+       features &= ~NETIF_F_ONE_FOR_ALL;
+       features |= NETIF_F_ALL_FOR_ALL;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               features = netdev_increment_features(features,
+                                                    port->dev->features,
+                                                    mask);
+       }
+       rcu_read_unlock();
+       return features;
+}
+
+static const struct net_device_ops team_netdev_ops = {
+       .ndo_init               = team_init,
+       .ndo_uninit             = team_uninit,
+       .ndo_open               = team_open,
+       .ndo_stop               = team_close,
+       .ndo_start_xmit         = team_xmit,
+       .ndo_change_rx_flags    = team_change_rx_flags,
+       .ndo_set_rx_mode        = team_set_rx_mode,
+       .ndo_set_mac_address    = team_set_mac_address,
+       .ndo_change_mtu         = team_change_mtu,
+       .ndo_get_stats64        = team_get_stats64,
+       .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
+       .ndo_add_slave          = team_add_slave,
+       .ndo_del_slave          = team_del_slave,
+       .ndo_fix_features       = team_fix_features,
+};
+
+
+/***********************
+ * rt netlink interface
+ ***********************/
+
+static void team_setup(struct net_device *dev)
+{
+       ether_setup(dev);
+
+       dev->netdev_ops = &team_netdev_ops;
+       dev->destructor = team_destructor;
+       dev->tx_queue_len = 0;
+       dev->flags |= IFF_MULTICAST;
+       dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+
+       /*
+        * Indicate we support unicast address filtering. That way core won't
+        * bring us to promisc mode in case a unicast addr is added.
+        * Let this up to underlay drivers.
+        */
+       dev->priv_flags |= IFF_UNICAST_FLT;
+
+       dev->features |= NETIF_F_LLTX;
+       dev->features |= NETIF_F_GRO;
+       dev->hw_features = NETIF_F_HW_VLAN_TX |
+                          NETIF_F_HW_VLAN_RX |
+                          NETIF_F_HW_VLAN_FILTER;
+
+       dev->features |= dev->hw_features;
+}
+
+static int team_newlink(struct net *src_net, struct net_device *dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+{
+       int err;
+
+       if (tb[IFLA_ADDRESS] == NULL)
+               random_ether_addr(dev->dev_addr);
+
+       err = register_netdevice(dev);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+       return 0;
+}
+
+static struct rtnl_link_ops team_link_ops __read_mostly = {
+       .kind           = DRV_NAME,
+       .priv_size      = sizeof(struct team),
+       .setup          = team_setup,
+       .newlink        = team_newlink,
+       .validate       = team_validate,
+};
+
+
+/***********************************
+ * Generic netlink custom interface
+ ***********************************/
+
+static struct genl_family team_nl_family = {
+       .id             = GENL_ID_GENERATE,
+       .name           = TEAM_GENL_NAME,
+       .version        = TEAM_GENL_VERSION,
+       .maxattr        = TEAM_ATTR_MAX,
+       .netnsok        = true,
+};
+
+static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
+       [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
+       [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
+       [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
+       [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+       [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
+       [TEAM_ATTR_OPTION_NAME] = {
+               .type = NLA_STRING,
+               .len = TEAM_STRING_MAX_LEN,
+       },
+       [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
+       [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
+       [TEAM_ATTR_OPTION_DATA] = {
+               .type = NLA_BINARY,
+               .len = TEAM_STRING_MAX_LEN,
+       },
+};
+
+static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *msg;
+       void *hdr;
+       int err;
+
+       msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+                         &team_nl_family, 0, TEAM_CMD_NOOP);
+       if (IS_ERR(hdr)) {
+               err = PTR_ERR(hdr);
+               goto err_msg_put;
+       }
+
+       genlmsg_end(msg, hdr);
+
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_msg_put:
+       nlmsg_free(msg);
+
+       return err;
+}
+
+/*
+ * Netlink cmd functions should be locked by following two functions.
+ * Since dev gets held here, that ensures dev won't disappear in between.
+ */
+static struct team *team_nl_team_get(struct genl_info *info)
+{
+       struct net *net = genl_info_net(info);
+       int ifindex;
+       struct net_device *dev;
+       struct team *team;
+
+       if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
+               return NULL;
+
+       ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
+       dev = dev_get_by_index(net, ifindex);
+       if (!dev || dev->netdev_ops != &team_netdev_ops) {
+               if (dev)
+                       dev_put(dev);
+               return NULL;
+       }
+
+       team = netdev_priv(dev);
+       mutex_lock(&team->lock);
+       return team;
+}
+
+static void team_nl_team_put(struct team *team)
+{
+       mutex_unlock(&team->lock);
+       dev_put(team->dev);
+}
+
+static int team_nl_send_generic(struct genl_info *info, struct team *team,
+                               int (*fill_func)(struct sk_buff *skb,
+                                                struct genl_info *info,
+                                                int flags, struct team *team))
+{
+       struct sk_buff *skb;
+       int err;
+
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       err = fill_func(skb, info, NLM_F_ACK, team);
+       if (err < 0)
+               goto err_fill;
+
+       err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+       return err;
+
+err_fill:
+       nlmsg_free(skb);
+       return err;
+}
+
+static int team_nl_fill_options_get_changed(struct sk_buff *skb,
+                                           u32 pid, u32 seq, int flags,
+                                           struct team *team,
+                                           struct team_option *changed_option)
+{
+       struct nlattr *option_list;
+       void *hdr;
+       struct team_option *option;
+
+       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+                         TEAM_CMD_OPTIONS_GET);
+       if (IS_ERR(hdr))
+               return PTR_ERR(hdr);
+
+       NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+       option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+       if (!option_list)
+               return -EMSGSIZE;
+
+       list_for_each_entry(option, &team->option_list, list) {
+               struct nlattr *option_item;
+               long arg;
+
+               option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+               if (!option_item)
+                       goto nla_put_failure;
+               NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
+               if (option == changed_option)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+               switch (option->type) {
+               case TEAM_OPTION_TYPE_U32:
+                       NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
+                       team_option_get(team, option, &arg);
+                       NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+                       break;
+               case TEAM_OPTION_TYPE_STRING:
+                       NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
+                       team_option_get(team, option, &arg);
+                       NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
+                                      (char *) arg);
+                       break;
+               default:
+                       BUG();
+               }
+               nla_nest_end(skb, option_item);
+       }
+
+       nla_nest_end(skb, option_list);
+       return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+       genlmsg_cancel(skb, hdr);
+       return -EMSGSIZE;
+}
+
+static int team_nl_fill_options_get(struct sk_buff *skb,
+                                   struct genl_info *info, int flags,
+                                   struct team *team)
+{
+       return team_nl_fill_options_get_changed(skb, info->snd_pid,
+                                               info->snd_seq, NLM_F_ACK,
+                                               team, NULL);
+}
+
+static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct team *team;
+       int err;
+
+       team = team_nl_team_get(info);
+       if (!team)
+               return -EINVAL;
+
+       err = team_nl_send_generic(info, team, team_nl_fill_options_get);
+
+       team_nl_team_put(team);
+
+       return err;
+}
+
+static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct team *team;
+       int err = 0;
+       int i;
+       struct nlattr *nl_option;
+
+       team = team_nl_team_get(info);
+       if (!team)
+               return -EINVAL;
+
+       err = -EINVAL;
+       if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
+               err = -EINVAL;
+               goto team_put;
+       }
+
+       nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
+               struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+               enum team_option_type opt_type;
+               struct team_option *option;
+               char *opt_name;
+               bool opt_found = false;
+
+               if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
+                       err = -EINVAL;
+                       goto team_put;
+               }
+               err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+                                      nl_option, team_nl_option_policy);
+               if (err)
+                       goto team_put;
+               if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
+                   !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
+                   !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+                       err = -EINVAL;
+                       goto team_put;
+               }
+               switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+               case NLA_U32:
+                       opt_type = TEAM_OPTION_TYPE_U32;
+                       break;
+               case NLA_STRING:
+                       opt_type = TEAM_OPTION_TYPE_STRING;
+                       break;
+               default:
+                       goto team_put;
+               }
+
+               opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
+               list_for_each_entry(option, &team->option_list, list) {
+                       long arg;
+                       struct nlattr *opt_data_attr;
+
+                       if (option->type != opt_type ||
+                           strcmp(option->name, opt_name))
+                               continue;
+                       opt_found = true;
+                       opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+                       switch (opt_type) {
+                       case TEAM_OPTION_TYPE_U32:
+                               arg = nla_get_u32(opt_data_attr);
+                               break;
+                       case TEAM_OPTION_TYPE_STRING:
+                               arg = (long) nla_data(opt_data_attr);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       err = team_option_set(team, option, &arg);
+                       if (err)
+                               goto team_put;
+               }
+               if (!opt_found) {
+                       err = -ENOENT;
+                       goto team_put;
+               }
+       }
+
+team_put:
+       team_nl_team_put(team);
+
+       return err;
+}
+
+static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
+                                             u32 pid, u32 seq, int flags,
+                                             struct team *team,
+                                             struct team_port *changed_port)
+{
+       struct nlattr *port_list;
+       void *hdr;
+       struct team_port *port;
+
+       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+                         TEAM_CMD_PORT_LIST_GET);
+       if (IS_ERR(hdr))
+               return PTR_ERR(hdr);
+
+       NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+       port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+       if (!port_list)
+               return -EMSGSIZE;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               struct nlattr *port_item;
+
+               port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+               if (!port_item)
+                       goto nla_put_failure;
+               NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+               if (port == changed_port)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+               if (port->linkup)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
+               NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
+               NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+               nla_nest_end(skb, port_item);
+       }
+
+       nla_nest_end(skb, port_list);
+       return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+       genlmsg_cancel(skb, hdr);
+       return -EMSGSIZE;
+}
+
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+                                     struct genl_info *info, int flags,
+                                     struct team *team)
+{
+       return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
+                                                 info->snd_seq, NLM_F_ACK,
+                                                 team, NULL);
+}
+
+static int team_nl_cmd_port_list_get(struct sk_buff *skb,
+                                    struct genl_info *info)
+{
+       struct team *team;
+       int err;
+
+       team = team_nl_team_get(info);
+       if (!team)
+               return -EINVAL;
+
+       err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
+
+       team_nl_team_put(team);
+
+       return err;
+}
+
+static struct genl_ops team_nl_ops[] = {
+       {
+               .cmd = TEAM_CMD_NOOP,
+               .doit = team_nl_cmd_noop,
+               .policy = team_nl_policy,
+       },
+       {
+               .cmd = TEAM_CMD_OPTIONS_SET,
+               .doit = team_nl_cmd_options_set,
+               .policy = team_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = TEAM_CMD_OPTIONS_GET,
+               .doit = team_nl_cmd_options_get,
+               .policy = team_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = TEAM_CMD_PORT_LIST_GET,
+               .doit = team_nl_cmd_port_list_get,
+               .policy = team_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+};
+
+static struct genl_multicast_group team_change_event_mcgrp = {
+       .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+};
+
+static int team_nl_send_event_options_get(struct team *team,
+                                         struct team_option *changed_option)
+{
+       struct sk_buff *skb;
+       int err;
+       struct net *net = dev_net(team->dev);
+
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
+                                              changed_option);
+       if (err < 0)
+               goto err_fill;
+
+       err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+                                     GFP_KERNEL);
+       return err;
+
+err_fill:
+       nlmsg_free(skb);
+       return err;
+}
+
+static int team_nl_send_event_port_list_get(struct team_port *port)
+{
+       struct sk_buff *skb;
+       int err;
+       struct net *net = dev_net(port->team->dev);
+
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
+                                                port->team, port);
+       if (err < 0)
+               goto err_fill;
+
+       err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+                                     GFP_KERNEL);
+       return err;
+
+err_fill:
+       nlmsg_free(skb);
+       return err;
+}
+
+static int team_nl_init(void)
+{
+       int err;
+
+       err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
+                                           ARRAY_SIZE(team_nl_ops));
+       if (err)
+               return err;
+
+       err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
+       if (err)
+               goto err_change_event_grp_reg;
+
+       return 0;
+
+err_change_event_grp_reg:
+       genl_unregister_family(&team_nl_family);
+
+       return err;
+}
+
+static void team_nl_fini(void)
+{
+       genl_unregister_family(&team_nl_family);
+}
+
+
+/******************
+ * Change checkers
+ ******************/
+
+static void __team_options_change_check(struct team *team,
+                                       struct team_option *changed_option)
+{
+       int err;
+
+       err = team_nl_send_event_options_get(team, changed_option);
+       if (err)
+               netdev_warn(team->dev, "Failed to send options change via netlink\n");
+}
+
+/* rtnl lock is held */
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+       int err;
+
+       if (port->linkup == linkup)
+               return;
+
+       port->linkup = linkup;
+       if (linkup) {
+               struct ethtool_cmd ecmd;
+
+               err = __ethtool_get_settings(port->dev, &ecmd);
+               if (!err) {
+                       port->speed = ethtool_cmd_speed(&ecmd);
+                       port->duplex = ecmd.duplex;
+                       goto send_event;
+               }
+       }
+       port->speed = 0;
+       port->duplex = 0;
+
+send_event:
+       err = team_nl_send_event_port_list_get(port);
+       if (err)
+               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
+                           port->dev->name);
+
+}
+
+static void team_port_change_check(struct team_port *port, bool linkup)
+{
+       struct team *team = port->team;
+
+       mutex_lock(&team->lock);
+       __team_port_change_check(port, linkup);
+       mutex_unlock(&team->lock);
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static int team_device_event(struct notifier_block *unused,
+                            unsigned long event, void *ptr)
+{
+       struct net_device *dev = (struct net_device *) ptr;
+       struct team_port *port;
+
+       port = team_port_get_rtnl(dev);
+       if (!port)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UP:
+               if (netif_carrier_ok(dev))
+                       team_port_change_check(port, true);
+       case NETDEV_DOWN:
+               team_port_change_check(port, false);
+       case NETDEV_CHANGE:
+               if (netif_running(port->dev))
+                       team_port_change_check(port,
+                                              !!netif_carrier_ok(port->dev));
+               break;
+       case NETDEV_UNREGISTER:
+               team_del_slave(port->team->dev, dev);
+               break;
+       case NETDEV_FEAT_CHANGE:
+               team_compute_features(port->team);
+               break;
+       case NETDEV_CHANGEMTU:
+               /* Forbid to change mtu of underlaying device */
+               return NOTIFY_BAD;
+       case NETDEV_PRE_TYPE_CHANGE:
+               /* Forbid to change type of underlaying device */
+               return NOTIFY_BAD;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block team_notifier_block __read_mostly = {
+       .notifier_call = team_device_event,
+};
+
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init team_module_init(void)
+{
+       int err;
+
+       register_netdevice_notifier(&team_notifier_block);
+
+       err = rtnl_link_register(&team_link_ops);
+       if (err)
+               goto err_rtnl_reg;
+
+       err = team_nl_init();
+       if (err)
+               goto err_nl_init;
+
+       return 0;
+
+err_nl_init:
+       rtnl_link_unregister(&team_link_ops);
+
+err_rtnl_reg:
+       unregister_netdevice_notifier(&team_notifier_block);
+
+       return err;
+}
+
+static void __exit team_module_exit(void)
+{
+       team_nl_fini();
+       rtnl_link_unregister(&team_link_ops);
+       unregister_netdevice_notifier(&team_notifier_block);
+}
+
+module_init(team_module_init);
+module_exit(team_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Ethernet team device driver");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
new file mode 100644 (file)
index 0000000..f4d960e
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_team.h>
+
+struct ab_priv {
+       struct team_port __rcu *active_port;
+};
+
+static struct ab_priv *ab_priv(struct team *team)
+{
+       return (struct ab_priv *) &team->mode_priv;
+}
+
+static rx_handler_result_t ab_receive(struct team *team, struct team_port *port,
+                                     struct sk_buff *skb) {
+       struct team_port *active_port;
+
+       active_port = rcu_dereference(ab_priv(team)->active_port);
+       if (active_port != port)
+               return RX_HANDLER_EXACT;
+       return RX_HANDLER_ANOTHER;
+}
+
+static bool ab_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct team_port *active_port;
+
+       active_port = rcu_dereference(ab_priv(team)->active_port);
+       if (unlikely(!active_port))
+               goto drop;
+       skb->dev = active_port->dev;
+       if (dev_queue_xmit(skb))
+               return false;
+       return true;
+
+drop:
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+static void ab_port_leave(struct team *team, struct team_port *port)
+{
+       if (ab_priv(team)->active_port == port)
+               RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+}
+
+static int ab_active_port_get(struct team *team, void *arg)
+{
+       u32 *ifindex = arg;
+
+       *ifindex = 0;
+       if (ab_priv(team)->active_port)
+               *ifindex = ab_priv(team)->active_port->dev->ifindex;
+       return 0;
+}
+
+static int ab_active_port_set(struct team *team, void *arg)
+{
+       u32 *ifindex = arg;
+       struct team_port *port;
+
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               if (port->dev->ifindex == *ifindex) {
+                       rcu_assign_pointer(ab_priv(team)->active_port, port);
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+static const struct team_option ab_options[] = {
+       {
+               .name = "activeport",
+               .type = TEAM_OPTION_TYPE_U32,
+               .getter = ab_active_port_get,
+               .setter = ab_active_port_set,
+       },
+};
+
+int ab_init(struct team *team)
+{
+       return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+void ab_exit(struct team *team)
+{
+       team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+static const struct team_mode_ops ab_mode_ops = {
+       .init                   = ab_init,
+       .exit                   = ab_exit,
+       .receive                = ab_receive,
+       .transmit               = ab_transmit,
+       .port_leave             = ab_port_leave,
+};
+
+static struct team_mode ab_mode = {
+       .kind           = "activebackup",
+       .owner          = THIS_MODULE,
+       .priv_size      = sizeof(struct ab_priv),
+       .ops            = &ab_mode_ops,
+};
+
+static int __init ab_init_module(void)
+{
+       return team_mode_register(&ab_mode);
+}
+
+static void __exit ab_cleanup_module(void)
+{
+       team_mode_unregister(&ab_mode);
+}
+
+module_init(ab_init_module);
+module_exit(ab_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Active-backup mode for team");
+MODULE_ALIAS("team-mode-activebackup");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
new file mode 100644 (file)
index 0000000..a0e8f80
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+struct rr_priv {
+       unsigned int sent_packets;
+};
+
+static struct rr_priv *rr_priv(struct team *team)
+{
+       return (struct rr_priv *) &team->mode_priv;
+}
+
+static struct team_port *__get_first_port_up(struct team *team,
+                                            struct team_port *port)
+{
+       struct team_port *cur;
+
+       if (port->linkup)
+               return port;
+       cur = port;
+       list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+               if (cur->linkup)
+                       return cur;
+       list_for_each_entry_rcu(cur, &team->port_list, list) {
+               if (cur == port)
+                       break;
+               if (cur->linkup)
+                       return cur;
+       }
+       return NULL;
+}
+
+static bool rr_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct team_port *port;
+       int port_index;
+
+       port_index = rr_priv(team)->sent_packets++ % team->port_count;
+       port = team_get_port_by_index_rcu(team, port_index);
+       port = __get_first_port_up(team, port);
+       if (unlikely(!port))
+               goto drop;
+       skb->dev = port->dev;
+       if (dev_queue_xmit(skb))
+               return false;
+       return true;
+
+drop:
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+static int rr_port_enter(struct team *team, struct team_port *port)
+{
+       return team_port_set_team_mac(port);
+}
+
+static void rr_port_change_mac(struct team *team, struct team_port *port)
+{
+       team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops rr_mode_ops = {
+       .transmit               = rr_transmit,
+       .port_enter             = rr_port_enter,
+       .port_change_mac        = rr_port_change_mac,
+};
+
+static struct team_mode rr_mode = {
+       .kind           = "roundrobin",
+       .owner          = THIS_MODULE,
+       .priv_size      = sizeof(struct rr_priv),
+       .ops            = &rr_mode_ops,
+};
+
+static int __init rr_init_module(void)
+{
+       return team_mode_register(&rr_mode);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+       team_mode_unregister(&rr_mode);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Round-robin mode for team");
+MODULE_ALIAS("team-mode-roundrobin");
index 7bea9c65119e339326155c45e4af6e268bda0c6c..93c5d72711b023be9623e85b3fc9044d50cfef59 100644 (file)
@@ -123,7 +123,7 @@ struct tun_struct {
        gid_t                   group;
 
        struct net_device       *dev;
-       u32                     set_features;
+       netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
                          NETIF_F_TSO6|NETIF_F_UFO)
        struct fasync_struct    *fasync;
@@ -454,7 +454,8 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tun_net_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
@@ -1196,7 +1197,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
  * privs required. */
 static int set_offload(struct tun_struct *tun, unsigned long arg)
 {
-       u32 features = 0;
+       netdev_features_t features = 0;
 
        if (arg & TUN_F_CSUM) {
                features |= NETIF_F_HW_CSUM;
@@ -1589,16 +1590,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct tun_struct *tun = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
        switch (tun->flags & TUN_TYPE_MASK) {
        case TUN_TUN_DEV:
-               strcpy(info->bus_info, "tun");
+               strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
                break;
        case TUN_TAP_DEV:
-               strcpy(info->bus_info, "tap");
+               strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
                break;
        }
 }
index e81e22e3d1d2fcf84be504403002b52d492a01d8..dbdca225b846aa06bd4af8cccbb53bf2f18c9814 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
 
-#define DRIVER_VERSION "26-Sep-2011"
+#define DRIVER_VERSION "22-Dec-2011"
 #define DRIVER_NAME "asix"
 
 /* ASIX AX8817X based USB 2.0 Ethernet Devices */
 #define MARVELL_CTRL_TXDELAY   0x0002
 #define MARVELL_CTRL_RXDELAY   0x0080
 
-#define        PHY_MODE_RTL8211CL      0x0004
+#define        PHY_MODE_RTL8211CL      0x000C
 
 /* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
 struct asix_data {
@@ -652,9 +652,17 @@ static u32 asix_get_phyid(struct usbnet *dev)
 {
        int phy_reg;
        u32 phy_id;
+       int i;
 
-       phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
-       if (phy_reg < 0)
+       /* Poll for the rare case the FW or phy isn't ready yet.  */
+       for (i = 0; i < 100; i++) {
+               phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+               if (phy_reg != 0 && phy_reg != 0xFFFF)
+                       break;
+               mdelay(1);
+       }
+
+       if (phy_reg <= 0 || phy_reg == 0xFFFF)
                return 0;
 
        phy_id = (phy_reg & 0xffff) << 16;
@@ -681,6 +689,10 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        }
        wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
        wolinfo->wolopts = 0;
+       if (opt & AX_MONITOR_LINK)
+               wolinfo->wolopts |= WAKE_PHY;
+       if (opt & AX_MONITOR_MAGIC)
+               wolinfo->wolopts |= WAKE_MAGIC;
 }
 
 static int
@@ -1075,7 +1087,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
 
 static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 {
-       int ret;
+       int ret, embd_phy;
        struct asix_data *data = (struct asix_data *)&dev->data;
        u8 buf[ETH_ALEN];
        u32 phyid;
@@ -1100,16 +1112,36 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->mii.reg_num_mask = 0x1f;
        dev->mii.phy_id = asix_get_phy_addr(dev);
 
-       phyid = asix_get_phyid(dev);
-       dbg("PHYID=0x%08x", phyid);
-
        dev->net->netdev_ops = &ax88772_netdev_ops;
        dev->net->ethtool_ops = &ax88772_ethtool_ops;
 
-       ret = ax88772_reset(dev);
+       embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+
+       /* Reset the PHY to normal operation mode */
+       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+       if (ret < 0) {
+               dbg("Select PHY #1 failed: %d", ret);
+               return ret;
+       }
+
+       ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
+       if (ret < 0)
+               return ret;
+
+       msleep(150);
+
+       ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
        if (ret < 0)
                return ret;
 
+       msleep(150);
+
+       ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+
+       /* Read PHYID register *AFTER* the PHY was reset properly */
+       phyid = asix_get_phyid(dev);
+       dbg("PHYID=0x%08x", phyid);
+
        /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
        if (dev->driver_info->flags & FLAG_FRAMING_AX) {
                /* hard_mtu  is still the default - the device does not support
@@ -1220,6 +1252,7 @@ static int ax88178_reset(struct usbnet *dev)
        __le16 eeprom;
        u8 status;
        int gpio0 = 0;
+       u32 phyid;
 
        asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
        dbg("GPIO Status: 0x%04x", status);
@@ -1235,12 +1268,13 @@ static int ax88178_reset(struct usbnet *dev)
                data->ledmode = 0;
                gpio0 = 1;
        } else {
-               data->phymode = le16_to_cpu(eeprom) & 7;
+               data->phymode = le16_to_cpu(eeprom) & 0x7F;
                data->ledmode = le16_to_cpu(eeprom) >> 8;
                gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
        }
        dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
 
+       /* Power up external GigaPHY through AX88178 GPIO pin */
        asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
        if ((le16_to_cpu(eeprom) >> 8) != 1) {
                asix_write_gpio(dev, 0x003c, 30);
@@ -1252,6 +1286,13 @@ static int ax88178_reset(struct usbnet *dev)
                asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
        }
 
+       /* Read PHYID register *AFTER* powering up PHY */
+       phyid = asix_get_phyid(dev);
+       dbg("PHYID=0x%08x", phyid);
+
+       /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
+       asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
+
        asix_sw_reset(dev, 0);
        msleep(150);
 
@@ -1396,7 +1437,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        int ret;
        u8 buf[ETH_ALEN];
-       u32 phyid;
        struct asix_data *data = (struct asix_data *)&dev->data;
 
        data->eeprom_len = AX88772_EEPROM_LEN;
@@ -1423,12 +1463,12 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->netdev_ops = &ax88178_netdev_ops;
        dev->net->ethtool_ops = &ax88178_ethtool_ops;
 
-       phyid = asix_get_phyid(dev);
-       dbg("PHYID=0x%08x", phyid);
+       /* Blink LEDS so users know driver saw dongle */
+       asix_sw_reset(dev, 0);
+       msleep(150);
 
-       ret = ax88178_reset(dev);
-       if (ret < 0)
-               return ret;
+       asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
+       msleep(150);
 
        /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
        if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -1619,6 +1659,10 @@ static const struct usb_device_id        products [] = {
        // ASIX 88772a
        USB_DEVICE(0x0db0, 0xa877),
        .driver_info = (unsigned long) &ax88772_info,
+}, {
+       // Asus USB Ethernet Adapter
+       USB_DEVICE (0x0b95, 0x7e2b),
+       .driver_info = (unsigned long) &ax88772_info,
 },
        { },            // END
 };
index a60d0069cc458c0f48e006ec7648e8a2daa24d2f..331e44056f5ae7fa05bf6c7661960eabe737d943 100644 (file)
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
        struct page *page;
        int err;
 
-       page = __netdev_alloc_page(dev, gfp_flags);
+       page = alloc_page(gfp_flags);
        if (!page)
                return -ENOMEM;
 
@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
        err = usb_submit_urb(req, gfp_flags);
        if (unlikely(err)) {
                dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
-               netdev_free_page(dev, page);
+               put_page(page);
        }
        return err;
 }
@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
        dev->stats.rx_errors++;
 resubmit:
        if (page)
-               netdev_free_page(dev, page);
+               put_page(page);
        if (req)
-               rx_submit(pnd, req, GFP_ATOMIC);
+               rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
        for (i = 0; i < rxq_size; i++) {
                struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
 
-               if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+               if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
                        usbpn_close(dev);
                        return -ENOMEM;
                }
index c924ea2bce07a4c06aa8d6ab5d9c23620b446915..99ed6eb4dfaf6b8b1153dbe9bfcb4e5917500986 100644 (file)
@@ -567,7 +567,7 @@ static const struct usb_device_id   products [] = {
 {
        USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-       .driver_info = (unsigned long)&wwan_info,
+       .driver_info = 0,
 },
 
 /*
index f06fb78383a1b865b22c989661a1795f4d7f0368..009dd0f185355b4aa71eacdc1157f904565d248f 100644 (file)
@@ -465,12 +465,10 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
        int temp;
        u8 iface_no;
 
-       ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (ctx == NULL)
                return -ENODEV;
 
-       memset(ctx, 0, sizeof(*ctx));
-
        init_timer(&ctx->tx_timer);
        spin_lock_init(&ctx->mtx);
        ctx->netdev = dev->net;
index d43db32f94781f64e57fc4285c7c398e9287e885..9c26c6390d69a72abffbf47dc91d9aea96ec1ba0 100644 (file)
@@ -144,10 +144,11 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        }
 
        frame = (struct vl600_frame_hdr *) buf->data;
-       /* NOTE: Should check that frame->magic == 0x53544448?
-        * Otherwise if we receive garbage at the beginning of the frame
-        * we may end up allocating a huge buffer and saving all the
-        * future incoming data into it.  */
+       /* Yes, check that frame->magic == 0x53544448 (or 0x44544d48),
+        * otherwise we may run out of memory w/a bad packet */
+       if (ntohl(frame->magic) != 0x53544448 &&
+                       ntohl(frame->magic) != 0x44544d48)
+               goto error;
 
        if (buf->len < sizeof(*frame) ||
                        buf->len != le32_to_cpup(&frame->len)) {
@@ -296,6 +297,11 @@ encapsulate:
         * overwrite the remaining fields.
         */
        packet = (struct vl600_pkt_hdr *) skb->data;
+       /* The VL600 wants IPv6 packets to have an IPv4 ethertype
+        * Since this modem only supports IPv4 and IPv6, just set all
+        * frames to 0x0800 (ETH_P_IP)
+        */
+       packet->h_proto = htons(ETH_P_IP);
        memset(&packet->dummy, 0, sizeof(packet->dummy));
        packet->len = cpu_to_le32(orig_len);
 
@@ -308,21 +314,12 @@ encapsulate:
        if (skb->len < full_len) /* Pad */
                skb_put(skb, full_len - skb->len);
 
-       /* The VL600 wants IPv6 packets to have an IPv4 ethertype
-        * Check if this is an IPv6 packet, and set the ethertype
-        * to 0x800
-        */
-       if ((skb->data[sizeof(struct vl600_pkt_hdr *) + 0x22] & 0xf0) == 0x60) {
-               skb->data[sizeof(struct vl600_pkt_hdr *) + 0x20] = 0x08;
-               skb->data[sizeof(struct vl600_pkt_hdr *) + 0x21] = 0;
-       }
-
        return skb;
 }
 
 static const struct driver_info        vl600_info = {
        .description    = "LG VL600 modem",
-       .flags          = FLAG_ETHER | FLAG_RX_ASSEMBLE,
+       .flags          = FLAG_RX_ASSEMBLE | FLAG_WWAN,
        .bind           = vl600_bind,
        .unbind         = vl600_unbind,
        .status         = usbnet_cdc_status,
index 769f5090bda1300669dd3ad464e305408d92fb5f..5d99b8cacd7d01065007f625459edc2329e46e8a 100644 (file)
@@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus";
 #define        BMSR_MEDIA      (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
                        BMSR_100FULL | BMSR_ANEGCAPABLE)
 
-static int loopback;
-static int mii_mode;
+static bool loopback;
+static bool mii_mode;
 static char *devid;
 
 static struct usb_eth_dev usb_dev_id[] = {
@@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus)
        for (i = 0; i < REG_TIMEOUT; i++) {
                get_registers(pegasus, EthCtrl1, 1, &data);
                if (~data & 0x08) {
-                       if (loopback & 1)
+                       if (loopback)
                                break;
                        if (mii_mode && (pegasus->features & HAS_HOME_PNA))
                                set_register(pegasus, Gpio1, 0x34);
@@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
                data[1] |= 0x10;        /* set 100 Mbps */
        if (mii_mode)
                data[1] = 0;
-       data[2] = (loopback & 1) ? 0x09 : 0x01;
+       data[2] = loopback ? 0x09 : 0x01;
 
        memcpy(pegasus->eth_regs, data, sizeof(data));
        ret = set_registers(pegasus, EthCtrl0, 3, data);
index 22a7cf951e728132b6f55d0d99584fc50e374eaa..0d5da82f0ff71c53ba89c70dd8d4306a844e7c23 100644 (file)
@@ -51,6 +51,7 @@
 #define USB_VENDOR_ID_SMSC             (0x0424)
 #define USB_PRODUCT_ID_LAN7500         (0x7500)
 #define USB_PRODUCT_ID_LAN7505         (0x7505)
+#define RXW_PADDING                    2
 
 #define check_warn(ret, fmt, args...) \
        ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -75,7 +76,7 @@ struct usb_context {
        struct usbnet *dev;
 };
 
-static int turbo_mode = true;
+static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
@@ -727,7 +728,8 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
 }
 
 /* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+static int smsc75xx_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct usbnet *dev = netdev_priv(netdev);
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -1088,13 +1090,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
                memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
                le32_to_cpus(&rx_cmd_b);
-               skb_pull(skb, 4 + NET_IP_ALIGN);
+               skb_pull(skb, 4 + RXW_PADDING);
 
                packet = skb->data;
 
                /* get the packet length */
-               size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN;
-               align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+               size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
+               align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
 
                if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
                        netif_dbg(dev, rx_err, dev->net,
index eff67678c5a6a582aced31aa27c3dcc569aee768..db217ad66f26d05cd7ee531acb17c51d394e630b 100644 (file)
@@ -59,7 +59,7 @@ struct usb_context {
        struct usbnet *dev;
 };
 
-static int turbo_mode = true;
+static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
@@ -516,7 +516,8 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
 }
 
 /* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_features(struct net_device *netdev, u32 features)
+static int smsc95xx_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 read_buf;
index ef883e97cee08a557fd3dcc2546535c1ca487372..49f4667e1fa3c20342b1ca18240a93ece55f9016 100644 (file)
@@ -27,8 +27,8 @@
 
 struct veth_net_stats {
        u64                     rx_packets;
-       u64                     tx_packets;
        u64                     rx_bytes;
+       u64                     tx_packets;
        u64                     tx_bytes;
        u64                     rx_dropped;
        struct u64_stats_sync   syncp;
@@ -66,9 +66,8 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -271,7 +270,7 @@ static void veth_setup(struct net_device *dev)
        dev->features |= NETIF_F_LLTX;
        dev->destructor = veth_dev_free;
 
-       dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+       dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
 }
 
 /*
index 6ee8410443c46312ca62810199dc14ffbaf53966..2055386eda58d4bd21a73eaaebc6b839f3012d7f 100644 (file)
@@ -30,7 +30,7 @@
 static int napi_weight = 128;
 module_param(napi_weight, int, 0444);
 
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
 module_param(csum, bool, 0444);
 module_param(gso, bool, 0444);
 
@@ -39,6 +39,7 @@ module_param(gso, bool, 0444);
 #define GOOD_COPY_LEN  128
 
 #define VIRTNET_SEND_COMMAND_SG_MAX    2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
        struct u64_stats_sync syncp;
@@ -439,7 +440,13 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
        return err;
 }
 
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
 {
        int err;
@@ -501,7 +508,7 @@ static void refill_work(struct work_struct *work)
        /* In theory, this can happen: if we don't get any buffers in
         * we will *never* try to fill again. */
        if (still_empty)
-               schedule_delayed_work(&vi->refill, HZ/2);
+               queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
 }
 
 static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -520,7 +527,7 @@ again:
 
        if (vi->num < vi->max / 2) {
                if (!try_fill_recv(vi, GFP_ATOMIC))
-                       schedule_delayed_work(&vi->refill, 0);
+                       queue_delayed_work(system_nrt_wq, &vi->refill, 0);
        }
 
        /* Out of packets? */
@@ -699,6 +706,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
        }
 
        tot->tx_dropped = dev->stats.tx_dropped;
+       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
        tot->rx_dropped = dev->stats.rx_dropped;
        tot->rx_length_errors = dev->stats.rx_length_errors;
        tot->rx_frame_errors = dev->stats.rx_frame_errors;
@@ -719,6 +727,10 @@ static int virtnet_open(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
 
+       /* Make sure we have some buffers: if oom use wq. */
+       if (!try_fill_recv(vi, GFP_KERNEL))
+               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
        virtnet_napi_enable(vi);
        return 0;
 }
@@ -772,6 +784,8 @@ static int virtnet_close(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
 
+       /* Make sure refill_work doesn't re-enable napi! */
+       cancel_delayed_work_sync(&vi->refill);
        napi_disable(&vi->napi);
 
        return 0;
@@ -853,7 +867,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        kfree(buf);
 }
 
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -863,9 +877,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+       return 0;
 }
 
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -875,6 +890,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+       return 0;
 }
 
 static void virtnet_get_ringparam(struct net_device *dev,
@@ -889,7 +905,21 @@ static void virtnet_get_ringparam(struct net_device *dev,
 
 }
 
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+                               struct ethtool_drvinfo *info)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       struct virtio_device *vdev = vi->vdev;
+
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
+       .get_drvinfo = virtnet_get_drvinfo,
        .get_link = ethtool_op_get_link,
        .get_ringparam = virtnet_get_ringparam,
 };
@@ -1082,7 +1112,6 @@ static int virtnet_probe(struct virtio_device *vdev)
 
 unregister:
        unregister_netdev(dev);
-       cancel_delayed_work_sync(&vi->refill);
 free_vqs:
        vdev->config->del_vqs(vdev);
 free_stats:
@@ -1121,9 +1150,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
        /* Stop all the virtqueues. */
        vdev->config->reset(vdev);
 
-
        unregister_netdev(vi->dev);
-       cancel_delayed_work_sync(&vi->refill);
 
        /* Free unused buffers in both send and recv, if any. */
        free_unused_bufs(vi);
index d96bfb1ac20b161f10b19e362b5ad934110072cd..de7fc345148a889a772c9b208dd56137519fe7e1 100644 (file)
@@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1943,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        }
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1965,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        }
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 
@@ -2163,7 +2167,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
                rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
                get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
                for (i = 0; i < rssConf->indTableSize; i++)
-                       rssConf->indTable[i] = i % adapter->num_rx_queues;
+                       rssConf->indTable[i] = ethtool_rxfh_indir_default(
+                               i, adapter->num_rx_queues);
 
                devRead->rssConfDesc.confVer = 1;
                devRead->rssConfDesc.confLen = sizeof(*rssConf);
index e662cbc8bfbda34c8f158fd89ffb82ecfacebf61..a3eb75a62ea941ba46dd79bdedaf32e37d9cec1f 100644 (file)
@@ -202,14 +202,9 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
-       drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
 
        strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
                sizeof(drvinfo->version));
-       drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
-
-       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-       drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                ETHTOOL_BUSINFO_LEN);
@@ -262,11 +257,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
        }
 }
 
-int vmxnet3_set_features(struct net_device *netdev, u32 features)
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        unsigned long flags;
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
                if (features & NETIF_F_RXCSUM)
@@ -570,44 +565,38 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
 }
 
 #ifdef VMXNET3_RSS
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
+{
+       struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+       struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+       return rssConf->indTableSize;
+}
+
 static int
-vmxnet3_get_rss_indir(struct net_device *netdev,
-                     struct ethtool_rxfh_indir *p)
+vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        struct UPT1_RSSConf *rssConf = adapter->rss_conf;
-       unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+       unsigned int n = rssConf->indTableSize;
 
-       p->size = rssConf->indTableSize;
        while (n--)
-               p->ring_index[n] = rssConf->indTable[n];
+               p[n] = rssConf->indTable[n];
        return 0;
 
 }
 
 static int
-vmxnet3_set_rss_indir(struct net_device *netdev,
-                     const struct ethtool_rxfh_indir *p)
+vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
 {
        unsigned int i;
        unsigned long flags;
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 
-       if (p->size != rssConf->indTableSize)
-               return -EINVAL;
-       for (i = 0; i < rssConf->indTableSize; i++) {
-               /*
-                * Return with error code if any of the queue indices
-                * is out of range
-                */
-               if (p->ring_index[i] < 0 ||
-                   p->ring_index[i] >= adapter->num_rx_queues)
-                       return -EINVAL;
-       }
-
        for (i = 0; i < rssConf->indTableSize; i++)
-               rssConf->indTable[i] = p->ring_index[i];
+               rssConf->indTable[i] = p[i];
 
        spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -634,6 +623,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
        .set_ringparam     = vmxnet3_set_ringparam,
        .get_rxnfc         = vmxnet3_get_rxnfc,
 #ifdef VMXNET3_RSS
+       .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
        .get_rxfh_indir    = vmxnet3_get_rss_indir,
        .set_rxfh_indir    = vmxnet3_set_rss_indir,
 #endif
index b18eac1dccaa152f6a6dd1c8f0ae63335a710b8a..ed54797db1916d717fe3141559bf95bca2013547 100644 (file)
@@ -401,7 +401,7 @@ void
 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
 
 int
-vmxnet3_set_features(struct net_device *netdev, u32 features);
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
 
 int
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
index 783168cce077273e3d9b33e5a218af1677d7f11c..d43f4efd3e07abc6d62fff0343cfd46cec609097 100644 (file)
@@ -155,7 +155,7 @@ static int  emancipate( struct net_device * );
 static const char  version[] =
        "Granch SBNI12 driver ver 5.0.1  Jun 22 2001  Denis I.Timofeev.\n";
 
-static int  skip_pci_probe     __initdata = 0;
+static bool skip_pci_probe     __initdata = false;
 static int  scandone   __initdata = 0;
 static int  num                __initdata = 0;
 
index 0b4fd05e1508e2519508db7f41b7dea318e145f1..4f7748478984750fa4a9872341f3db5f36d802a8 100644 (file)
@@ -362,7 +362,7 @@ static int io=0x238;
 static int txdma=1;
 static int rxdma=3;
 static int irq=5;
-static int slow=0;
+static bool slow=false;
 
 module_param(io, int, 0);
 MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
index 4b9ecb20deec28f5710ba4f82c78502be1b23d50..f20886ade1ccdf3147253e90f397263066eab431 100644 (file)
@@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m)
 {
        struct device *dev = i2400m_dev(i2400m);
        struct i2400m_msg_hdr *tx_msg;
-       bool try_head = 0;
+       bool try_head = false;
        BUG_ON(i2400m->tx_msg != NULL);
        /*
         * In certain situations, TX queue might have enough space to
@@ -580,7 +580,7 @@ try_head:
        else if (tx_msg == TAIL_FULL) {
                i2400m_tx_skip_tail(i2400m);
                d_printf(2, dev, "new TX message: tail full, trying head\n");
-               try_head = 1;
+               try_head = true;
                goto try_head;
        }
        memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
        unsigned long flags;
        size_t padded_len;
        void *ptr;
-       bool try_head = 0;
+       bool try_head = false;
        unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
                || pl_type == I2400M_PT_RESET_COLD;
 
@@ -771,7 +771,7 @@ try_new:
                d_printf(2, dev, "pl append: tail full\n");
                i2400m_tx_close(i2400m);
                i2400m_tx_skip_tail(i2400m);
-               try_head = 1;
+               try_head = true;
                goto try_new;
        } else if (ptr == NULL) {       /* All full */
                result = -ENOSPC;
index ac357acfb3e9a569be66ecb671c5b83b06b1f586..99ef81b3d5a515d6cc4ae6f176514ceca3e2942d 100644 (file)
@@ -177,7 +177,6 @@ retry:
 static
 int i2400mu_txd(void *_i2400mu)
 {
-       int result = 0;
        struct i2400mu *i2400mu = _i2400mu;
        struct i2400m *i2400m = &i2400mu->i2400m;
        struct device *dev = &i2400mu->usb_iface->dev;
@@ -208,16 +207,14 @@ int i2400mu_txd(void *_i2400mu)
                /* Yeah, we ignore errors ... not much we can do */
                i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
                i2400m_tx_msg_sent(i2400m);     /* ack it, advance the FIFO */
-               if (result < 0)
-                       break;
        }
 
        spin_lock_irqsave(&i2400m->tx_lock, flags);
        i2400mu->tx_kthread = NULL;
        spin_unlock_irqrestore(&i2400m->tx_lock, flags);
 
-       d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
-       return result;
+       d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+       return 0;
 }
 
 
index ac1176a4f4656fd31ed143a5638a53627a8b94ba..1c008c61b95ce8113d8ad230ee4e48aad27992b7 100644 (file)
@@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
        emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
        emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
        emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
-       emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+       emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
        emmh32_final(&context->seed, (u8*)&mic->mic);
 
        /*    New Type/length ?????????? */
@@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
                emmh32_update(&context->seed, eth->da, ETH_ALEN*2); 
                emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap)); 
                emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));        
-               emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);     
+               emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);  
                //Calculate MIC
                emmh32_final(&context->seed, digest);
        
index e564e585b22144c9e02e21c31842d268d5279dda..c2b2518c2ecd30732c900ab3490d01765b0c35b9 100644 (file)
@@ -914,7 +914,7 @@ enum ath5k_dmasize {
  */
 
 #define AR5K_KEYCACHE_SIZE     8
-extern int ath5k_modparam_nohwcrypt;
+extern bool ath5k_modparam_nohwcrypt;
 
 /***********************\
  HW RELATED DEFINITIONS
index 178a4dd1031672a5f711ec54bee1955033288537..d366dadcf86e6a47c6a48261bd2a480a803d5cac 100644 (file)
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-int ath5k_modparam_nohwcrypt;
+bool ath5k_modparam_nohwcrypt;
 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
-static int modparam_all_channels;
+static bool modparam_all_channels;
 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
 
-static int modparam_fastchanswitch;
+static bool modparam_fastchanswitch;
 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
 
index 4aed3a3ab109cefcddc6114966e7c04fd0964b90..250db40b751d1780b0c7937cb53c6de016973bdd 100644 (file)
@@ -1159,7 +1159,7 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
         */
        if (fast && (ah->ah_radio != AR5K_RF2413) &&
        (ah->ah_radio != AR5K_RF5413))
-               fast = 0;
+               fast = false;
 
        /* Disable sleep clock operation
         * to avoid register access delay on certain
@@ -1185,7 +1185,7 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
        if (ret && fast) {
                ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
                        "DMA didn't stop, falling back to normal reset\n");
-               fast = 0;
+               fast = false;
                /* Non fatal, just continue with
                 * normal reset */
                ret = 0;
index 368ecbd172a3d03225d7d7ccfc6a19d21aee61f9..7f55be3092d1c63698bc2f9d83d88d6ce2769868 100644 (file)
@@ -430,7 +430,7 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
                        ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
                                   "capabilities (%d) - assuming P2P not "
                                   "supported\n", ret);
-                       ar->p2p = 0;
+                       ar->p2p = false;
                }
        }
 
index f57084ec49e755f5b426afd964eef0ca53d109b9..619b95d764ff33ea0adceabd8a15cb5c93815f1e 100644 (file)
@@ -383,7 +383,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
        if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
            ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
            (eep->baseEepHeader.pwdclkind == 0))
-               ah->need_an_top2_fixup = 1;
+               ah->need_an_top2_fixup = true;
 
        if ((common->bus_ops->ath_bus_type == ATH_USB) &&
            (AR_SREV_9280(ah)))
index 6e3d8384e081effb2422e4410ea1db1eb192c7d4..e267c92dbfb8e22de58a0156771a461ccfada95a 100644 (file)
@@ -1797,6 +1797,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_node *an = (struct ath_node *) sta->drv_priv;
 
+       if (!(sc->sc_flags & SC_OP_TXAGGR))
+               return;
+
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
                an->sleeping = true;
index c8fc180f5218cbc31ff98bbe55cfa514d9cf1b0f..3182408ffe35abcc08c075d8f950cf433d1ca9cf 100644 (file)
@@ -1964,7 +1964,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        if (txq == sc->tx.txq_map[q] &&
            ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
                ieee80211_stop_queue(sc->hw, q);
-               txq->stopped = 1;
+               txq->stopped = true;
        }
 
        ath_tx_start_dma(sc, skb, txctl);
@@ -2020,7 +2020,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
                if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
                        ieee80211_wake_queue(sc->hw, q);
-                       txq->stopped = 0;
+                       txq->stopped = false;
                }
        }
 
index f06e0695d412fb9631117d6261608834a4376eb9..db774212161bc820225fb9f14564302be55cc3be 100644 (file)
@@ -48,7 +48,7 @@
 #include "carl9170.h"
 #include "cmd.h"
 
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
 
@@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw)
 
        mutex_lock(&ar->mutex);
        if (IS_ACCEPTING_CMD(ar)) {
-               rcu_assign_pointer(ar->beacon_iter, NULL);
+               RCU_INIT_POINTER(ar->beacon_iter, NULL);
 
                carl9170_led_set_state(ar, 0);
 
@@ -678,7 +678,7 @@ unlock:
                vif_priv->active = false;
                bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
                ar->vifs--;
-               rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
+               RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
                list_del_rcu(&vif_priv->list);
                mutex_unlock(&ar->mutex);
                synchronize_rcu();
@@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
        WARN_ON(vif_priv->enable_beacon);
        vif_priv->enable_beacon = false;
        list_del_rcu(&vif_priv->list);
-       rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
+       RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
 
        if (vif == main_vif) {
                rcu_read_unlock();
@@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
                }
 
                for (i = 0; i < CARL9170_NUM_TID; i++)
-                       rcu_assign_pointer(sta_info->agg[i], NULL);
+                       RCU_INIT_POINTER(sta_info->agg[i], NULL);
 
                sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
                sta_info->ht_sta = true;
@@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
                        struct carl9170_sta_tid *tid_info;
 
                        tid_info = rcu_dereference(sta_info->agg[i]);
-                       rcu_assign_pointer(sta_info->agg[i], NULL);
+                       RCU_INIT_POINTER(sta_info->agg[i], NULL);
 
                        if (!tid_info)
                                continue;
@@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                        spin_unlock_bh(&ar->tx_ampdu_list_lock);
                }
 
-               rcu_assign_pointer(sta_info->agg[tid], NULL);
+               RCU_INIT_POINTER(sta_info->agg[tid], NULL);
                rcu_read_unlock();
 
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
index 56d37dc967aae08be1199aa7280c4fdbb9e90a0e..b5f1b91002bbe609cf2a1d97e127d52f7baf32e4 100644 (file)
@@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
        else
                ring->ops = &dma32_ops;
        if (for_tx) {
-               ring->tx = 1;
+               ring->tx = true;
                ring->current_slot = -1;
        } else {
                if (ring->index == 0) {
@@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev)
 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
 {
        u64 orig_mask = mask;
-       bool fallback = 0;
+       bool fallback = false;
        int err;
 
        /* Try to set the DMA mask. If it fails, try falling back to a
@@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
                }
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                if (mask == DMA_BIT_MASK(32)) {
                        mask = DMA_BIT_MASK(30);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                b43err(dev->wl, "The machine/kernel does not support "
@@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
        memset(meta, 0, sizeof(*meta));
 
        meta->skb = skb;
-       meta->is_last_fragment = 1;
+       meta->is_last_fragment = true;
        priv_info->bouncebuffer = NULL;
 
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
@@ -1468,7 +1468,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
                unsigned int skb_mapping = skb_get_queue_mapping(skb);
                ieee80211_stop_queue(dev->wl->hw, skb_mapping);
                dev->wl->tx_queue_stopped[skb_mapping] = 1;
-               ring->stopped = 1;
+               ring->stopped = true;
                if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
                        b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
                }
@@ -1586,7 +1586,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        }
        if (ring->stopped) {
                B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
-               ring->stopped = 0;
+               ring->stopped = false;
        }
 
        if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
index a38c1c6446adafa8b2b9bd56d6ce0fa4f069438f..d79ab2a227e183c9acf3d29288c7c0618aa3ad4b 100644 (file)
@@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev,
        if (radio_enabled)
                turn_on = atomic_read(&led->state) != LED_OFF;
        else
-               turn_on = 0;
+               turn_on = false;
        if (turn_on == led->hw_state)
                return;
        led->hw_state = turn_on;
@@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
        if (sprom[led_index] == 0xFF) {
                /* There is no LED information in the SPROM
                 * for this LED. Hardcode it here. */
-               *activelow = 0;
+               *activelow = false;
                switch (led_index) {
                case 0:
                        *behaviour = B43_LED_ACTIVITY;
-                       *activelow = 1;
+                       *activelow = true;
                        if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
                                *behaviour = B43_LED_RADIO_ALL;
                        break;
@@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev)
        if (led->wl) {
                if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
                        b43_led_turn_on(dev, led->index, led->activelow);
-                       led->hw_state = 1;
+                       led->hw_state = true;
                        atomic_set(&led->state, 1);
                } else {
                        b43_led_turn_off(dev, led->index, led->activelow);
-                       led->hw_state = 0;
+                       led->hw_state = false;
                        atomic_set(&led->state, 0);
                }
        }
@@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev)
        led = &dev->wl->leds.led_tx;
        if (led->wl) {
                b43_led_turn_off(dev, led->index, led->activelow);
-               led->hw_state = 0;
+               led->hw_state = false;
                atomic_set(&led->state, 0);
        }
        led = &dev->wl->leds.led_rx;
        if (led->wl) {
                b43_led_turn_off(dev, led->index, led->activelow);
-               led->hw_state = 0;
+               led->hw_state = false;
                atomic_set(&led->state, 0);
        }
        led = &dev->wl->leds.led_assoc;
        if (led->wl) {
                b43_led_turn_off(dev, led->index, led->activelow);
-               led->hw_state = 0;
+               led->hw_state = false;
                atomic_set(&led->state, 0);
        }
 
index 4c82d582a524dbf512eb2ab8e2b14bba2540186e..916123a3d74e71053a908491f64eec4a0d978a76 100644 (file)
@@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
        const struct b43_rfatt *rfatt;
        const struct b43_bbatt *bbatt;
        u64 power_vector;
-       bool table_changed = 0;
+       bool table_changed = false;
 
        BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
        B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
@@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
                        lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
                                         | (val & 0x00FF);
                }
-               table_changed = 1;
+               table_changed = true;
        }
        if (table_changed) {
                /* The table changed in memory. Update the hardware table. */
@@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
        unsigned long now;
        unsigned long expire;
        struct b43_lo_calib *cal, *tmp;
-       bool current_item_expired = 0;
+       bool current_item_expired = false;
        bool hwpctl;
 
        if (!lo)
@@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
                if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
                    b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
                        B43_WARN_ON(current_item_expired);
-                       current_item_expired = 1;
+                       current_item_expired = true;
                }
                if (b43_debug(dev, B43_DBG_LO)) {
                        b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
index 989f654de006ae0e095e4c4f093e223812e41fae..1c6f19393efa72037b7369993bf7af463a7bb51a 100644 (file)
@@ -1122,17 +1122,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
        B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP));
 
        if (ps_flags & B43_PS_ENABLED) {
-               hwps = 1;
+               hwps = true;
        } else if (ps_flags & B43_PS_DISABLED) {
-               hwps = 0;
+               hwps = false;
        } else {
                //TODO: If powersave is not off and FIXME is not set and we are not in adhoc
                //      and thus is not an AP and we are associated, set bit 25
        }
        if (ps_flags & B43_PS_AWAKE) {
-               awake = 1;
+               awake = true;
        } else if (ps_flags & B43_PS_ASLEEP) {
-               awake = 0;
+               awake = false;
        } else {
                //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
                //      or we are associated, or FIXME, or the latest PS-Poll packet sent was
@@ -1140,8 +1140,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
        }
 
 /* FIXME: For now we force awake-on and hwps-off */
-       hwps = 0;
-       awake = 1;
+       hwps = false;
+       awake = true;
 
        macctl = b43_read32(dev, B43_MMIO_MACCTL);
        if (hwps)
@@ -1339,7 +1339,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
                return;
        if (dev->noisecalc.calculation_running)
                return;
-       dev->noisecalc.calculation_running = 1;
+       dev->noisecalc.calculation_running = true;
        dev->noisecalc.nr_samples = 0;
 
        b43_generate_noise_sample(dev);
@@ -1408,7 +1408,7 @@ static void handle_irq_noise(struct b43_wldev *dev)
                        average -= 48;
 
                dev->stats.link_noise = average;
-               dev->noisecalc.calculation_running = 0;
+               dev->noisecalc.calculation_running = false;
                return;
        }
 generate_new:
@@ -1424,7 +1424,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev)
                        b43_power_saving_ctl_bits(dev, 0);
        }
        if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
-               dev->dfq_valid = 1;
+               dev->dfq_valid = true;
 }
 
 static void handle_irq_atim_end(struct b43_wldev *dev)
@@ -1433,7 +1433,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev)
                b43_write32(dev, B43_MMIO_MACCMD,
                            b43_read32(dev, B43_MMIO_MACCMD)
                            | B43_MACCMD_DFQ_VALID);
-               dev->dfq_valid = 0;
+               dev->dfq_valid = false;
        }
 }
 
@@ -1539,7 +1539,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
        unsigned int i, len, variable_len;
        const struct ieee80211_mgmt *bcn;
        const u8 *ie;
-       bool tim_found = 0;
+       bool tim_found = false;
        unsigned int rate;
        u16 ctl;
        int antenna;
@@ -1588,7 +1588,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
                        /* A valid TIM is at least 4 bytes long. */
                        if (ie_len < 4)
                                break;
-                       tim_found = 1;
+                       tim_found = true;
 
                        tim_position = sizeof(struct b43_plcp_hdr6);
                        tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable);
@@ -1625,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev)
        if (wl->beacon0_uploaded)
                return;
        b43_write_beacon_template(dev, 0x68, 0x18);
-       wl->beacon0_uploaded = 1;
+       wl->beacon0_uploaded = true;
 }
 
 static void b43_upload_beacon1(struct b43_wldev *dev)
@@ -1635,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev)
        if (wl->beacon1_uploaded)
                return;
        b43_write_beacon_template(dev, 0x468, 0x1A);
-       wl->beacon1_uploaded = 1;
+       wl->beacon1_uploaded = true;
 }
 
 static void handle_irq_beacon(struct b43_wldev *dev)
@@ -1667,7 +1667,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
        if (unlikely(wl->beacon_templates_virgin)) {
                /* We never uploaded a beacon before.
                 * Upload both templates now, but only mark one valid. */
-               wl->beacon_templates_virgin = 0;
+               wl->beacon_templates_virgin = false;
                b43_upload_beacon0(dev);
                b43_upload_beacon1(dev);
                cmd = b43_read32(dev, B43_MMIO_MACCMD);
@@ -1755,8 +1755,8 @@ static void b43_update_templates(struct b43_wl *wl)
        if (wl->current_beacon)
                dev_kfree_skb_any(wl->current_beacon);
        wl->current_beacon = beacon;
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
        ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
 }
 
@@ -1913,7 +1913,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
                        b43err(dev->wl, "This device does not support DMA "
                               "on your system. It will now be switched to PIO.\n");
                        /* Fall back to PIO transfers if we get fatal DMA errors! */
-                       dev->use_pio = 1;
+                       dev->use_pio = true;
                        b43_controller_restart(dev, "DMA error");
                        return;
                }
@@ -2240,12 +2240,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
                filename = NULL;
        else
                goto err_no_pcm;
-       fw->pcm_request_failed = 0;
+       fw->pcm_request_failed = false;
        err = b43_do_request_fw(ctx, filename, &fw->pcm);
        if (err == -ENOENT) {
                /* We did not find a PCM file? Not fatal, but
                 * core rev <= 10 must do without hwcrypto then. */
-               fw->pcm_request_failed = 1;
+               fw->pcm_request_failed = true;
        } else if (err)
                goto err_load;
 
@@ -2535,7 +2535,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
        dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
        dev->qos_enabled = !!modparam_qos;
        /* Default to firmware/hardware crypto acceleration. */
-       dev->hwcrypto_enabled = 1;
+       dev->hwcrypto_enabled = true;
 
        if (dev->fw.opensource) {
                u16 fwcapa;
@@ -2549,7 +2549,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
                if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
                        b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
                        /* Disable hardware crypto and fall back to software crypto. */
-                       dev->hwcrypto_enabled = 0;
+                       dev->hwcrypto_enabled = false;
                }
                if (!(fwcapa & B43_FWCAPA_QOS)) {
                        b43info(dev->wl, "QoS not supported by firmware\n");
@@ -2557,7 +2557,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
                         * ieee80211_unregister to make sure the networking core can
                         * properly free possible resources. */
                        dev->wl->hw->queues = 1;
-                       dev->qos_enabled = 0;
+                       dev->qos_enabled = false;
                }
        } else {
                b43info(dev->wl, "Loading firmware version %u.%u "
@@ -3361,10 +3361,10 @@ static int b43_rng_init(struct b43_wl *wl)
        wl->rng.name = wl->rng_name;
        wl->rng.data_read = b43_rng_read;
        wl->rng.priv = (unsigned long)wl;
-       wl->rng_initialized = 1;
+       wl->rng_initialized = true;
        err = hwrng_register(&wl->rng);
        if (err) {
-               wl->rng_initialized = 0;
+               wl->rng_initialized = false;
                b43err(wl, "Failed to register the random "
                       "number generator (%d)\n", err);
        }
@@ -3718,13 +3718,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
                case IEEE80211_BAND_5GHZ:
                        if (d->phy.supports_5ghz) {
                                up_dev = d;
-                               gmode = 0;
+                               gmode = false;
                        }
                        break;
                case IEEE80211_BAND_2GHZ:
                        if (d->phy.supports_2ghz) {
                                up_dev = d;
-                               gmode = 1;
+                               gmode = true;
                        }
                        break;
                default:
@@ -4444,18 +4444,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
        atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
 
 #if B43_DEBUG
-       phy->phy_locked = 0;
-       phy->radio_locked = 0;
+       phy->phy_locked = false;
+       phy->radio_locked = false;
 #endif
 }
 
 static void setup_struct_wldev_for_init(struct b43_wldev *dev)
 {
-       dev->dfq_valid = 0;
+       dev->dfq_valid = false;
 
        /* Assume the radio is enabled. If it's not enabled, the state will
         * immediately get fixed on the first periodic work run. */
-       dev->radio_hw_enable = 1;
+       dev->radio_hw_enable = true;
 
        /* Stats */
        memset(&dev->stats, 0, sizeof(dev->stats));
@@ -4689,16 +4689,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
 
        if (b43_bus_host_is_pcmcia(dev->dev) ||
            b43_bus_host_is_sdio(dev->dev)) {
-               dev->__using_pio_transfers = 1;
+               dev->__using_pio_transfers = true;
                err = b43_pio_init(dev);
        } else if (dev->use_pio) {
                b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
                        "This should not be needed and will result in lower "
                        "performance.\n");
-               dev->__using_pio_transfers = 1;
+               dev->__using_pio_transfers = true;
                err = b43_pio_init(dev);
        } else {
-               dev->__using_pio_transfers = 0;
+               dev->__using_pio_transfers = false;
                err = b43_dma_init(dev);
        }
        if (err)
@@ -4752,7 +4752,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
        b43dbg(wl, "Adding Interface type %d\n", vif->type);
 
        dev = wl->current_dev;
-       wl->operating = 1;
+       wl->operating = true;
        wl->vif = vif;
        wl->if_type = vif->type;
        memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -4786,7 +4786,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
        B43_WARN_ON(wl->vif != vif);
        wl->vif = NULL;
 
-       wl->operating = 0;
+       wl->operating = false;
 
        b43_adjust_opmode(dev);
        memset(wl->mac_addr, 0, ETH_ALEN);
@@ -4808,12 +4808,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
        memset(wl->bssid, 0, ETH_ALEN);
        memset(wl->mac_addr, 0, ETH_ALEN);
        wl->filter_flags = 0;
-       wl->radiotap_enabled = 0;
+       wl->radiotap_enabled = false;
        b43_qos_clear(wl);
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
-       wl->beacon_templates_virgin = 1;
-       wl->radio_enabled = 1;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
+       wl->beacon_templates_virgin = true;
+       wl->radio_enabled = true;
 
        mutex_lock(&wl->mutex);
 
@@ -4859,7 +4859,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
                        goto out_unlock;
        }
        b43_wireless_core_exit(dev);
-       wl->radio_enabled = 0;
+       wl->radio_enabled = false;
 
 out_unlock:
        mutex_unlock(&wl->mutex);
@@ -5047,7 +5047,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
        struct pci_dev *pdev = NULL;
        int err;
        u32 tmp;
-       bool have_2ghz_phy = 0, have_5ghz_phy = 0;
+       bool have_2ghz_phy = false, have_5ghz_phy = false;
 
        /* Do NOT do any device initialization here.
         * Do it in wireless_core_init() instead.
@@ -5090,7 +5090,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
        }
 
        dev->phy.gmode = have_2ghz_phy;
-       dev->phy.radio_on = 1;
+       dev->phy.radio_on = true;
        b43_wireless_core_reset(dev, dev->phy.gmode);
 
        err = b43_phy_versioning(dev);
@@ -5101,11 +5101,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
            (pdev->device != 0x4312 &&
             pdev->device != 0x4319 && pdev->device != 0x4324)) {
                /* No multiband support. */
-               have_2ghz_phy = 0;
-               have_5ghz_phy = 0;
+               have_2ghz_phy = false;
+               have_5ghz_phy = false;
                switch (dev->phy.type) {
                case B43_PHYTYPE_A:
-                       have_5ghz_phy = 1;
+                       have_5ghz_phy = true;
                        break;
                case B43_PHYTYPE_LP: //FIXME not always!
 #if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
@@ -5115,7 +5115,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
                case B43_PHYTYPE_N:
                case B43_PHYTYPE_HT:
                case B43_PHYTYPE_LCN:
-                       have_2ghz_phy = 1;
+                       have_2ghz_phy = true;
                        break;
                default:
                        B43_WARN_ON(1);
@@ -5131,8 +5131,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
                /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
                if (dev->phy.type != B43_PHYTYPE_N &&
                    dev->phy.type != B43_PHYTYPE_LP) {
-                       have_2ghz_phy = 1;
-                       have_5ghz_phy = 0;
+                       have_2ghz_phy = true;
+                       have_5ghz_phy = false;
                }
        }
 
index 3ea44bb036844ef4a5da47b91ffd80ec8a18ec31..3f8883b14d9cc98ca334890541320aa50836d36e 100644 (file)
@@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev)
 
 #if B43_DEBUG
        B43_WARN_ON(dev->phy.radio_locked);
-       dev->phy.radio_locked = 1;
+       dev->phy.radio_locked = true;
 #endif
 
        macctl = b43_read32(dev, B43_MMIO_MACCTL);
@@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev)
 
 #if B43_DEBUG
        B43_WARN_ON(!dev->phy.radio_locked);
-       dev->phy.radio_locked = 0;
+       dev->phy.radio_locked = false;
 #endif
 
        /* Commit any write */
@@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev)
 {
 #if B43_DEBUG
        B43_WARN_ON(dev->phy.phy_locked);
-       dev->phy.phy_locked = 1;
+       dev->phy.phy_locked = true;
 #endif
        B43_WARN_ON(dev->dev->core_rev < 3);
 
@@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
 {
 #if B43_DEBUG
        B43_WARN_ON(!dev->phy.phy_locked);
-       dev->phy.phy_locked = 0;
+       dev->phy.phy_locked = false;
 #endif
        B43_WARN_ON(dev->dev->core_rev < 3);
 
index 8e157bc213f3016935b86387a47a7823dd7cdaa6..12f467b8d564f1c5cdca06acbdf65ca8f80b48c8 100644 (file)
@@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
                if (b43_phy_read(dev, 0x0033) & 0x0800)
                        break;
 
-               gphy->aci_enable = 1;
+               gphy->aci_enable = true;
 
                phy_stacksave(B43_PHY_RADIO_BITFIELD);
                phy_stacksave(B43_PHY_G_CRS);
@@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
                if (!(b43_phy_read(dev, 0x0033) & 0x0800))
                        break;
 
-               gphy->aci_enable = 0;
+               gphy->aci_enable = false;
 
                phy_stackrestore(B43_PHY_RADIO_BITFIELD);
                phy_stackrestore(B43_PHY_G_CRS);
@@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
                        bbatt.att = 11;
                        if (phy->radio_rev == 8) {
                                rfatt.att = 15;
-                               rfatt.with_padmix = 1;
+                               rfatt.with_padmix = true;
                        } else {
                                rfatt.att = 9;
-                               rfatt.with_padmix = 0;
+                               rfatt.with_padmix = false;
                        }
                        b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
                }
@@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
        struct b43_bus_dev *bdev = dev->dev;
        struct b43_phy *phy = &dev->phy;
 
-       rf->with_padmix = 0;
+       rf->with_padmix = false;
 
        if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
            dev->dev->board_type == SSB_BOARD_BCM4309G) {
@@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
                        return;
                case 8:
                        rf->att = 0xA;
-                       rf->with_padmix = 1;
+                       rf->with_padmix = true;
                        return;
                case 9:
                default:
@@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
        B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
                    (phy->radio_ver != 0x2050)); /* Not supported anymore */
 
-       gphy->dyn_tssi_tbl = 0;
+       gphy->dyn_tssi_tbl = false;
 
        if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
            pab0 != -1 && pab1 != -1 && pab2 != -1) {
@@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
                                                               pab1, pab2);
                if (!gphy->tssi2dbm)
                        return -ENOMEM;
-               gphy->dyn_tssi_tbl = 1;
+               gphy->dyn_tssi_tbl = true;
        } else {
                /* pabX values not set in SPROM. */
                gphy->tgt_idle_tssi = 52;
@@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev)
 
        if (gphy->dyn_tssi_tbl)
                kfree(gphy->tssi2dbm);
-       gphy->dyn_tssi_tbl = 0;
+       gphy->dyn_tssi_tbl = false;
        gphy->tssi2dbm = NULL;
 
        kfree(gphy);
@@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
        if (phy->rev == 1) {
                /* Workaround: Temporarly disable gmode through the early init
                 * phase, as the gmode stuff is not needed for phy rev 1 */
-               phy->gmode = 0;
+               phy->gmode = false;
                b43_wireless_core_reset(dev, 0);
                b43_phy_initg(dev);
-               phy->gmode = 1;
+               phy->gmode = true;
                b43_wireless_core_reset(dev, 1);
        }
 
@@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
                                      gphy->radio_off_context.rfover);
                        b43_phy_write(dev, B43_PHY_RFOVERVAL,
                                      gphy->radio_off_context.rfoverval);
-                       gphy->radio_off_context.valid = 0;
+                       gphy->radio_off_context.valid = false;
                }
                channel = phy->channel;
                b43_gphy_channel_switch(dev, 6, 1);
@@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
                rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
                gphy->radio_off_context.rfover = rfover;
                gphy->radio_off_context.rfoverval = rfoverval;
-               gphy->radio_off_context.valid = 1;
+               gphy->radio_off_context.valid = true;
                b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
                b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
        }
@@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
        if ((phy->rev == 0) || (!phy->gmode))
                return -ENODEV;
 
-       gphy->aci_wlan_automatic = 0;
+       gphy->aci_wlan_automatic = false;
        switch (mode) {
        case B43_INTERFMODE_AUTOWLAN:
-               gphy->aci_wlan_automatic = 1;
+               gphy->aci_wlan_automatic = true;
                if (gphy->aci_enable)
                        mode = B43_INTERFMODE_MANUALWLAN;
                else
@@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
                b43_radio_interference_mitigation_disable(dev, currentmode);
 
        if (mode == B43_INTERFMODE_NONE) {
-               gphy->aci_enable = 0;
-               gphy->aci_hw_rssi = 0;
+               gphy->aci_enable = false;
+               gphy->aci_hw_rssi = false;
        } else
                b43_radio_interference_mitigation_enable(dev, mode);
        gphy->interfmode = mode;
index f93d66b1817b156727d91cb5210d225293148b6a..3ae28561f7a4078f88f9ddc636e2b89c5df42c6a 100644 (file)
@@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
        struct b43_phy_lp *lpphy = dev->phy.lp;
 
        if (user)
-               lpphy->crs_usr_disable = 1;
+               lpphy->crs_usr_disable = true;
        else
-               lpphy->crs_sys_disable = 1;
+               lpphy->crs_sys_disable = true;
        b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
 }
 
@@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
        struct b43_phy_lp *lpphy = dev->phy.lp;
 
        if (user)
-               lpphy->crs_usr_disable = 0;
+               lpphy->crs_usr_disable = false;
        else
-               lpphy->crs_sys_disable = 0;
+               lpphy->crs_sys_disable = false;
 
        if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
                if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
index aadfed056204073429be3b47e651832d9449fd68..bf5a43855358319991f94e9e0c43f341dcb161bb 100644 (file)
@@ -3416,7 +3416,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
 
        if (dev->phy.rev >= 4) {
                avoid = nphy->hang_avoid;
-               nphy->hang_avoid = 0;
+               nphy->hang_avoid = false;
        }
 
        b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
@@ -3526,7 +3526,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
 
                        if (phy6or5x && updated[core] == 0) {
                                b43_nphy_update_tx_cal_ladder(dev, core);
-                               updated[core] = 1;
+                               updated[core] = true;
                        }
 
                        tmp = (params[core].ncorr[type] << 8) | 0x66;
index fcff923b3c18b25d44ae4e266747825b8b571ebf..3533ab86bd363982e4ffd5f73e7e7af1dacc70d6 100644 (file)
@@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
                /* Not enough memory on the queue. */
                err = -EBUSY;
                ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-               q->stopped = 1;
+               q->stopped = true;
                goto out;
        }
 
@@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
            (q->free_packet_slots == 0)) {
                /* The queue is full. */
                ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-               q->stopped = 1;
+               q->stopped = true;
        }
 
 out:
@@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
 
        if (q->stopped) {
                ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
-               q->stopped = 0;
+               q->stopped = false;
        }
 }
 
@@ -617,9 +617,19 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
        const char *err_msg = NULL;
        struct b43_rxhdr_fw4 *rxhdr =
                (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
+       size_t rxhdr_size = sizeof(*rxhdr);
 
        BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
-       memset(rxhdr, 0, sizeof(*rxhdr));
+       switch (dev->fw.hdr_format) {
+       case B43_FW_HDR_410:
+       case B43_FW_HDR_351:
+               rxhdr_size -= sizeof(rxhdr->format_598) -
+                       sizeof(rxhdr->format_351);
+               break;
+       case B43_FW_HDR_598:
+               break;
+       }
+       memset(rxhdr, 0, rxhdr_size);
 
        /* Check if we have data and wait for it to get ready. */
        if (q->rev >= 8) {
@@ -657,11 +667,11 @@ data_ready:
 
        /* Get the preamble (RX header) */
        if (q->rev >= 8) {
-               b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+               b43_block_read(dev, rxhdr, rxhdr_size,
                               q->mmio_base + B43_PIO8_RXDATA,
                               sizeof(u32));
        } else {
-               b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+               b43_block_read(dev, rxhdr, rxhdr_size,
                               q->mmio_base + B43_PIO_RXDATA,
                               sizeof(u16));
        }
index 5f77cbe0b6aaeb5d6e121ad5e37b70a3b8596d62..2c5367884b3ff0f0a600ba5c6b409b43fabcc349 100644 (file)
@@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
                              struct ieee80211_tx_info *report,
                              const struct b43_txstatus *status)
 {
-       bool frame_success = 1;
+       bool frame_success = true;
        int retry_limit;
 
        /* preserve the confiured retry limit before clearing the status
@@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
                /* The frame was not ACKed... */
                if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
                        /* ...but we expected an ACK. */
-                       frame_success = 0;
+                       frame_success = false;
                }
        }
        if (status->frame_count == 0) {
index aebef75a2c6289d71df2fd26d748cd3207847e13..f1f8bd09bd87fd378cb405d6aa56a38eafe4fb3c 100644 (file)
@@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
        ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
        ring->index = controller_index;
        if (for_tx) {
-               ring->tx = 1;
+               ring->tx = true;
                ring->current_slot = -1;
        } else {
                if (ring->index == 0) {
@@ -805,7 +805,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
 static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
 {
        u64 orig_mask = mask;
-       bool fallback = 0;
+       bool fallback = false;
        int err;
 
        /* Try to set the DMA mask. If it fails, try falling back to a
@@ -819,12 +819,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
                }
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                if (mask == DMA_BIT_MASK(32)) {
                        mask = DMA_BIT_MASK(30);
-                       fallback = 1;
+                       fallback = true;
                        continue;
                }
                b43legacyerr(dev->wl, "The machine/kernel does not support "
@@ -857,7 +857,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
 #ifdef CONFIG_B43LEGACY_PIO
                b43legacywarn(dev->wl, "DMA for this device not supported. "
                        "Falling back to PIO\n");
-               dev->__using_pio = 1;
+               dev->__using_pio = true;
                return -EAGAIN;
 #else
                b43legacyerr(dev->wl, "DMA for this device not supported and "
@@ -1067,7 +1067,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
        memset(meta, 0, sizeof(*meta));
 
        meta->skb = skb;
-       meta->is_last_fragment = 1;
+       meta->is_last_fragment = true;
 
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
        /* create a bounce buffer in zone_dma on mapping failure. */
@@ -1183,7 +1183,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
                unsigned int skb_mapping = skb_get_queue_mapping(skb);
                ieee80211_stop_queue(dev->wl->hw, skb_mapping);
                dev->wl->tx_queue_stopped[skb_mapping] = 1;
-               ring->stopped = 1;
+               ring->stopped = true;
                if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
                        b43legacydbg(dev->wl, "Stopped TX ring %d\n",
                               ring->index);
@@ -1293,7 +1293,7 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
        dev->stats.last_tx = jiffies;
        if (ring->stopped) {
                B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
-               ring->stopped = 0;
+               ring->stopped = false;
        }
 
        if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
index 2f1bfdc44f94ff2658f90561b21814bbdce72ffd..fd4565389c77df59056d239e2bc778164a622496 100644 (file)
@@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev)
                if (sprom[i] == 0xFF) {
                        /* There is no LED information in the SPROM
                         * for this LED. Hardcode it here. */
-                       activelow = 0;
+                       activelow = false;
                        switch (i) {
                        case 0:
                                behaviour = B43legacy_LED_ACTIVITY;
-                               activelow = 1;
+                               activelow = true;
                                if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
                                        behaviour = B43legacy_LED_RADIO_ALL;
                                break;
index d9185633e82f4a074578e15fe4ad5499ab217868..75e70bce40f6b8eb896ebb348e24c0f32a4b1930 100644 (file)
@@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags)
        macctl &= ~B43legacy_MACCTL_GMODE;
        if (flags & B43legacy_TMSLOW_GMODE) {
                macctl |= B43legacy_MACCTL_GMODE;
-               dev->phy.gmode = 1;
+               dev->phy.gmode = true;
        } else
-               dev->phy.gmode = 0;
+               dev->phy.gmode = false;
        macctl |= B43legacy_MACCTL_IHR_ENABLED;
        b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
 }
@@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev)
        if (dev->noisecalc.calculation_running)
                return;
        dev->noisecalc.channel_at_start = dev->phy.channel;
-       dev->noisecalc.calculation_running = 1;
+       dev->noisecalc.calculation_running = true;
        dev->noisecalc.nr_samples = 0;
 
        b43legacy_generate_noise_sample(dev);
@@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
 
                dev->stats.link_noise = average;
 drop_calculation:
-               dev->noisecalc.calculation_running = 0;
+               dev->noisecalc.calculation_running = false;
                return;
        }
 generate_new:
@@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
                        b43legacy_power_saving_ctl_bits(dev, -1, -1);
        }
        if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
-               dev->dfq_valid = 1;
+               dev->dfq_valid = true;
 }
 
 static void handle_irq_atim_end(struct b43legacy_wldev *dev)
@@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev)
                b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
                                  b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
                                  | B43legacy_MACCMD_DFQ_VALID);
-               dev->dfq_valid = 0;
+               dev->dfq_valid = false;
        }
 }
 
@@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
        unsigned int i, len, variable_len;
        const struct ieee80211_mgmt *bcn;
        const u8 *ie;
-       bool tim_found = 0;
+       bool tim_found = false;
        unsigned int rate;
        u16 ctl;
        int antenna;
@@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
                        /* A valid TIM is at least 4 bytes long. */
                        if (ie_len < 4)
                                break;
-                       tim_found = 1;
+                       tim_found = true;
 
                        tim_position = sizeof(struct b43legacy_plcp_hdr6);
                        tim_position += offsetof(struct ieee80211_mgmt,
@@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
         *        but we don't use that feature anyway. */
        b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
                                      &__b43legacy_ratetable[3]);
-       wl->beacon0_uploaded = 1;
+       wl->beacon0_uploaded = true;
 }
 
 static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
@@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
        if (wl->beacon1_uploaded)
                return;
        b43legacy_write_beacon_template(dev, 0x468, 0x1A);
-       wl->beacon1_uploaded = 1;
+       wl->beacon1_uploaded = true;
 }
 
 static void handle_irq_beacon(struct b43legacy_wldev *dev)
@@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
        if (unlikely(wl->beacon_templates_virgin)) {
                /* We never uploaded a beacon before.
                 * Upload both templates now, but only mark one valid. */
-               wl->beacon_templates_virgin = 0;
+               wl->beacon_templates_virgin = false;
                b43legacy_upload_beacon0(dev);
                b43legacy_upload_beacon1(dev);
                cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
@@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
        if (wl->current_beacon)
                dev_kfree_skb_any(wl->current_beacon);
        wl->current_beacon = beacon;
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
        ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
 }
 
@@ -2544,7 +2544,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl,
                if (d->phy.possible_phymodes & phymode) {
                        /* Ok, this device supports the PHY-mode.
                         * Set the gmode bit. */
-                       *gmode = 1;
+                       *gmode = true;
                        *dev = d;
 
                        return 0;
@@ -2580,7 +2580,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
        struct b43legacy_wldev *uninitialized_var(up_dev);
        struct b43legacy_wldev *down_dev;
        int err;
-       bool gmode = 0;
+       bool gmode = false;
        int prev_status;
 
        err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
@@ -3084,12 +3084,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
 
        /* Assume the radio is enabled. If it's not enabled, the state will
         * immediately get fixed on the first periodic work run. */
-       dev->radio_hw_enable = 1;
+       dev->radio_hw_enable = true;
 
        phy->savedpctlreg = 0xFFFF;
-       phy->aci_enable = 0;
-       phy->aci_wlan_automatic = 0;
-       phy->aci_hw_rssi = 0;
+       phy->aci_enable = false;
+       phy->aci_wlan_automatic = false;
+       phy->aci_hw_rssi = false;
 
        lo = phy->_lo_pairs;
        if (lo)
@@ -3121,7 +3121,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
 static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
 {
        /* Flags */
-       dev->dfq_valid = 0;
+       dev->dfq_valid = false;
 
        /* Stats */
        memset(&dev->stats, 0, sizeof(dev->stats));
@@ -3227,9 +3227,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev)
        phy->lofcal = 0xFFFF;
        phy->initval = 0xFFFF;
 
-       phy->aci_enable = 0;
-       phy->aci_wlan_automatic = 0;
-       phy->aci_hw_rssi = 0;
+       phy->aci_enable = false;
+       phy->aci_wlan_automatic = false;
+       phy->aci_hw_rssi = false;
 
        phy->antenna_diversity = 0xFFFF;
        memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
@@ -3395,7 +3395,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
        b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
 
        dev = wl->current_dev;
-       wl->operating = 1;
+       wl->operating = true;
        wl->vif = vif;
        wl->if_type = vif->type;
        memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -3429,7 +3429,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
        B43legacy_WARN_ON(wl->vif != vif);
        wl->vif = NULL;
 
-       wl->operating = 0;
+       wl->operating = false;
 
        spin_lock_irqsave(&wl->irq_lock, flags);
        b43legacy_adjust_opmode(dev);
@@ -3453,10 +3453,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
        memset(wl->bssid, 0, ETH_ALEN);
        memset(wl->mac_addr, 0, ETH_ALEN);
        wl->filter_flags = 0;
-       wl->beacon0_uploaded = 0;
-       wl->beacon1_uploaded = 0;
-       wl->beacon_templates_virgin = 1;
-       wl->radio_enabled = 1;
+       wl->beacon0_uploaded = false;
+       wl->beacon1_uploaded = false;
+       wl->beacon_templates_virgin = true;
+       wl->radio_enabled = true;
 
        mutex_lock(&wl->mutex);
 
@@ -3495,7 +3495,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
        if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
                b43legacy_wireless_core_stop(dev);
        b43legacy_wireless_core_exit(dev);
-       wl->radio_enabled = 0;
+       wl->radio_enabled = false;
        mutex_unlock(&wl->mutex);
 }
 
@@ -3654,7 +3654,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
                have_bphy = 1;
 
        dev->phy.gmode = (have_gphy || have_bphy);
-       dev->phy.radio_on = 1;
+       dev->phy.radio_on = true;
        tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
        b43legacy_wireless_core_reset(dev, tmp);
 
@@ -3745,7 +3745,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
                     (void (*)(unsigned long))b43legacy_interrupt_tasklet,
                     (unsigned long)wldev);
        if (modparam_pio)
-               wldev->__using_pio = 1;
+               wldev->__using_pio = true;
        INIT_LIST_HEAD(&wldev->list);
 
        err = b43legacy_wireless_core_attach(wldev);
index 475eb14e665bb324ade483f67336d89a04a1a113..fcbafcd603ccf1549c112012845dcd4a73a1b10c 100644 (file)
@@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev,
                if (b43legacy_phy_read(dev, 0x0033) & 0x0800)
                        break;
 
-               phy->aci_enable = 1;
+               phy->aci_enable = true;
 
                phy_stacksave(B43legacy_PHY_RADIO_BITFIELD);
                phy_stacksave(B43legacy_PHY_G_CRS);
@@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev,
                if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800))
                        break;
 
-               phy->aci_enable = 0;
+               phy->aci_enable = false;
 
                phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD);
                phy_stackrestore(B43legacy_PHY_G_CRS);
@@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
            (phy->rev == 0) || (!phy->gmode))
                return -ENODEV;
 
-       phy->aci_wlan_automatic = 0;
+       phy->aci_wlan_automatic = false;
        switch (mode) {
        case B43legacy_RADIO_INTERFMODE_AUTOWLAN:
-               phy->aci_wlan_automatic = 1;
+               phy->aci_wlan_automatic = true;
                if (phy->aci_enable)
                        mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN;
                else
@@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
                                                                currentmode);
 
        if (mode == B43legacy_RADIO_INTERFMODE_NONE) {
-               phy->aci_enable = 0;
-               phy->aci_hw_rssi = 0;
+               phy->aci_enable = false;
+               phy->aci_hw_rssi = false;
        } else
                b43legacy_radio_interference_mitigation_enable(dev, mode);
        phy->interfmode = mode;
@@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
                                            phy->radio_off_context.rfover);
                        b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
                                            phy->radio_off_context.rfoverval);
-                       phy->radio_off_context.valid = 0;
+                       phy->radio_off_context.valid = false;
                }
                channel = phy->channel;
                err = b43legacy_radio_selectchannel(dev,
@@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
        default:
                B43legacy_BUG_ON(1);
        }
-       phy->radio_on = 1;
+       phy->radio_on = true;
 }
 
 void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
@@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
                if (!force) {
                        phy->radio_off_context.rfover = rfover;
                        phy->radio_off_context.rfoverval = rfoverval;
-                       phy->radio_off_context.valid = 1;
+                       phy->radio_off_context.valid = true;
                }
                b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C);
                b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
                                    rfoverval & 0xFF73);
        } else
                b43legacy_phy_write(dev, 0x0015, 0xAA00);
-       phy->radio_on = 0;
+       phy->radio_on = false;
        b43legacydbg(dev->wl, "Radio initialized\n");
 }
 
index 72bee2c049574d2a914baa9ffd6ea0c365ea6949..eb9eb766ac270d5e3011b98568343eeba12c5833 100644 (file)
@@ -784,7 +784,7 @@ static int brcmf_netdev_stop(struct net_device *ndev)
                return 0;
 
        /* Set state and stop OS transmissions */
-       drvr->bus_if->drvr_up = 0;
+       drvr->bus_if->drvr_up = false;
        netif_stop_queue(ndev);
 
        return 0;
@@ -821,7 +821,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
        }
        /* Allow transmit calls */
        netif_start_queue(ndev);
-       drvr->bus_if->drvr_up = 1;
+       drvr->bus_if->drvr_up = true;
        if (brcmf_cfg80211_up(drvr->config)) {
                brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
                return -1;
index b4cf617276c933cbd4217b1bb9d6dbcaab815676..2e90a9a16ed6885e4792c56953efb701c8b8ee8e 100644 (file)
@@ -641,10 +641,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
        /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
        if ((core->id.id == SDIOD_CORE_ID)
            && ((rev > 0) && (rev <= 2)))
-               di->addrext = 0;
+               di->addrext = false;
        else if ((core->id.id == I2S_CORE_ID) &&
                 ((rev == 0) || (rev == 1)))
-               di->addrext = 0;
+               di->addrext = false;
        else
                di->addrext = _dma_isaddrext(di);
 
index 77fdc45b43efe93dcec3dbc9ae3945e6f2b4b84f..d106576ce338980b4645ced6084139084f74fc21 100644 (file)
@@ -1265,7 +1265,7 @@ uint brcms_reset(struct brcms_info *wl)
        brcms_c_reset(wl->wlc);
 
        /* dpc will not be rescheduled */
-       wl->resched = 0;
+       wl->resched = false;
 
        return 0;
 }
index efa0142bdad57d37db5a3d32a8d10c212fac1ddd..ce8562aa5db0bee06c59244ef9c3099b6cf58c81 100644 (file)
@@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
                si_pmu_pllupd(pi->sh->sih);
                write_phy_reg(pi, 0x942, 0);
                wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
-               pi_lcn->lcnphy_spurmod = 0;
+               pi_lcn->lcnphy_spurmod = false;
                mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
 
                write_phy_reg(pi, 0x425, 0x5907);
@@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
                write_phy_reg(pi, 0x942, 0);
                wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
 
-               pi_lcn->lcnphy_spurmod = 0;
+               pi_lcn->lcnphy_spurmod = false;
                mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
 
                write_phy_reg(pi, 0x425, 0x590a);
@@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
 {
        s8 index, delta_brd, delta_temp, new_index, tempcorrx;
        s16 manp, meas_temp, temp_diff;
-       bool neg = 0;
+       bool neg = false;
        u16 temp;
        struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
 
@@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
        manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
        temp_diff = manp - meas_temp;
        if (temp_diff < 0) {
-               neg = 1;
+               neg = true;
                temp_diff = -temp_diff;
        }
 
@@ -3682,8 +3682,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
        wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
        udelay(20);
        for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
-               phy_c23 = 1;
-               phy_c22 = 0;
+               phy_c23 = true;
+               phy_c22 = false;
                switch (cal_type) {
                case 0:
                        phy_c10 = 511;
@@ -3701,18 +3701,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
 
                phy_c9 = read_phy_reg(pi, 0x93d);
                phy_c9 = 2 * phy_c9;
-               phy_c24 = 0;
+               phy_c24 = false;
                phy_c5 = 7;
-               phy_c25 = 1;
+               phy_c25 = true;
                while (1) {
                        write_radio_reg(pi, RADIO_2064_REG026,
                                        (phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
                        udelay(50);
-                       phy_c22 = 0;
+                       phy_c22 = false;
                        ptr[130] = 0;
                        wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
                        if (ptr[130] == 1)
-                               phy_c22 = 1;
+                               phy_c22 = true;
                        if (phy_c22)
                                phy_c5 -= 1;
                        if ((phy_c22 != phy_c24) && (!phy_c25))
@@ -3722,7 +3722,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
                        if (phy_c5 <= 0 || phy_c5 >= 7)
                                break;
                        phy_c24 = phy_c22;
-                       phy_c25 = 0;
+                       phy_c25 = false;
                }
 
                if (phy_c5 < 0)
@@ -3773,10 +3773,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
                                        phy_c13 = phy_c11;
                                        phy_c14 = phy_c12;
                                }
-                               phy_c23 = 0;
+                               phy_c23 = false;
                        }
                }
-               phy_c23 = 1;
+               phy_c23 = true;
                phy_c15 = phy_c13;
                phy_c16 = phy_c14;
                phy_c7 = phy_c7 >> 1;
@@ -3966,7 +3966,7 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
 {
        u16 tempsenseval1, tempsenseval2;
        s16 avg = 0;
-       bool suspend = 0;
+       bool suspend = false;
 
        if (mode == 1) {
                suspend = (0 == (bcma_read32(pi->d11core,
@@ -4008,7 +4008,7 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
 {
        u16 tempsenseval1, tempsenseval2;
        s32 avg = 0;
-       bool suspend = 0;
+       bool suspend = false;
        u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
        struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
 
@@ -4076,7 +4076,7 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
 {
        u16 vbatsenseval;
        s32 avg = 0;
-       bool suspend = 0;
+       bool suspend = false;
 
        if (mode == 1) {
                suspend = (0 == (bcma_read32(pi->d11core,
index f7724d7f92871508278c75d150f11d6e64be12c7..67d6e324e26f7b2e75910921aceadc034a5a10ed 100644 (file)
@@ -1186,9 +1186,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
        /* Set up entry for this TFD in Tx byte-count array */
-       if (is_agg)
-               iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
-                                              le16_to_cpu(tx_cmd->len));
+       iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
        dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
                        DMA_BIDIRECTIONAL);
index 98a179f98ea14c6bf7ddde509079a977d64257bb..1f868b166d10ca4a730056316f803c28ec7034a3 100644 (file)
@@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = {
        .mac_addr               = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
 };
 
-static int modparam_reset;
+static bool modparam_reset;
 module_param_named(reset, modparam_reset, bool, 0644);
 MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
 
-static int modparam_wimax_enable = 1;
+static bool modparam_wimax_enable = true;
 module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
 MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
 
@@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work)
                iwm_invalidate_mlme_profile(iwm);
 
        clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
-       iwm->umac_profile_active = 0;
+       iwm->umac_profile_active = false;
        memset(iwm->bssid, 0, ETH_ALEN);
        iwm->channel = 0;
 
index a414768f40f11e7e002beae44d292972547cfbde..7d708f4395f38fa6fc21f408d99b68570d6dc580 100644 (file)
@@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
        clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
        clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
 
-       iwm->umac_profile_active = 0;
+       iwm->umac_profile_active = false;
        memset(iwm->bssid, 0, ETH_ALEN);
        iwm->channel = 0;
 
@@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
                             umac_sta->mac_addr,
                             umac_sta->flags & UMAC_STA_FLAG_QOS);
 
-               sta->valid = 1;
+               sta->valid = true;
                sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
                sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
                memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
@@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
                sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
 
                if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
-                       sta->valid = 0;
+                       sta->valid = false;
 
                break;
        case UMAC_OPCODE_CLEAR_ALL:
                for (i = 0; i < IWM_STA_TABLE_NUM; i++)
-                       iwm->sta_table[i].valid = 0;
+                       iwm->sta_table[i].valid = false;
 
                break;
        default:
@@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
 
        switch (hdr->oid) {
        case UMAC_WIFI_IF_CMD_SET_PROFILE:
-               iwm->umac_profile_active = 1;
+               iwm->umac_profile_active = true;
                break;
        default:
                break;
@@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
         */
        list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
                if (cmd->seq_num == seq_num) {
-                       cmd->resp_received = 1;
+                       cmd->resp_received = true;
                        cmd->buf.len = buf_size;
                        memcpy(cmd->buf.hdr, buf, buf_size);
                        wake_up_interruptible(&iwm->nonwifi_queue);
index e269351798611b501f05a459fc28f5e59b563cc2..3f7bf4d912b614e828abcd1a94e95aff6e0ee9b3 100644 (file)
@@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
         * Most of the libertas cards can do unaligned register access, but some
         * weird ones cannot. That's especially true for the CF8305 card.
         */
-       card->align_regs = 0;
+       card->align_regs = false;
 
        card->model = get_model(p_dev->manf_id, p_dev->card_id);
        if (card->model == MODEL_UNKNOWN) {
@@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
        /* Check if we have a current silicon */
        prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
        if (card->model == MODEL_8305) {
-               card->align_regs = 1;
+               card->align_regs = true;
                if (prod_id < IF_CS_CF8305_B1_REV) {
                        pr_err("8305 rev B0 and older are not supported\n");
                        ret = -ENODEV;
index ceb51b6e67026bb434c1db67259ffd27129917ff..a03457292c88c1fd5f96eb4ad5a60df671227d91 100644 (file)
@@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
                return;
 
        if (skb_queue_empty(&priv->bc_ps_buf)) {
-               bool tx_buff_bc = 0;
+               bool tx_buff_bc = false;
 
                while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
                        skb_queue_tail(&priv->bc_ps_buf, skb);
-                       tx_buff_bc = 1;
+                       tx_buff_bc = true;
                }
                if (tx_buff_bc) {
                        ieee80211_stop_queues(priv->hw);
index 52bcdf40d5bd64696091179518233d187d3971fb..4b9e730d2c8a13cc184f1ee5b5cba868f59398a7 100644 (file)
@@ -708,7 +708,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
 {
        struct mac80211_hwsim_data *data = hw->priv;
        wiphy_debug(hw->wiphy, "%s\n", __func__);
-       data->started = 1;
+       data->started = true;
        return 0;
 }
 
@@ -716,7 +716,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
 static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
 {
        struct mac80211_hwsim_data *data = hw->priv;
-       data->started = 0;
+       data->started = false;
        del_timer(&data->beacon_timer);
        wiphy_debug(hw->wiphy, "%s\n", __func__);
 }
index e40196dfdea06dba7a4185440cd71ea8830c2a92..470ca75ec250ae522184a1da80170d247ea6733f 100644 (file)
@@ -55,9 +55,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
 {
        bool cancel_flag = false;
        int status = adapter->cmd_wait_q.status;
-       struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
+       struct cmd_ctrl_node *cmd_queued;
 
+       if (!adapter->cmd_queued)
+               return 0;
+
+       cmd_queued = adapter->cmd_queued;
        adapter->cmd_queued = NULL;
+
        dev_dbg(adapter->dev, "cmd pending\n");
        atomic_inc(&adapter->cmd_pending);
 
index 8ea701261c619d44432303346c66c77d818b7e8b..7becea3dec654de21ab5090fc55b8a5068b7703f 100644 (file)
@@ -31,7 +31,7 @@
 #define MWL8K_VERSION  "0.13"
 
 /* Module parameters */
-static unsigned ap_mode_default;
+static bool ap_mode_default;
 module_param(ap_mode_default, bool, 0);
 MODULE_PARM_DESC(ap_mode_default,
                 "Set to 1 to make ap mode the default instead of sta mode");
@@ -743,10 +743,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
 
                ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
                if (ready_code == MWL8K_FWAP_READY) {
-                       priv->ap_fw = 1;
+                       priv->ap_fw = true;
                        break;
                } else if (ready_code == MWL8K_FWSTA_READY) {
-                       priv->ap_fw = 0;
+                       priv->ap_fw = false;
                        break;
                }
 
@@ -5634,8 +5634,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
        INIT_LIST_HEAD(&priv->vif_list);
 
        /* Set default radio state and preamble */
-       priv->radio_on = 0;
-       priv->radio_short_preamble = 0;
+       priv->radio_on = false;
+       priv->radio_short_preamble = false;
 
        /* Finalize join worker */
        INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
index b52acc4b408630967ac5a6469e6a9efe1f3d47dd..9fb77d0319f5de9ffde985de3691d0b4554cf152 100644 (file)
@@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644);
 MODULE_PARM_DESC(orinoco_debug, "Debug level");
 #endif
 
-static int suppress_linkstatus; /* = 0 */
+static bool suppress_linkstatus; /* = 0 */
 module_param(suppress_linkstatus, bool, 0644);
 MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
 
index db4d9a02f264eb6877face19cd892ac9e7d363fa..af2ca1a9c7d32ed014f11d1390e5c1d8bcf09fb6 100644 (file)
@@ -27,7 +27,7 @@
 #include "p54.h"
 #include "lmac.h"
 
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
index 0021e49485128b08d95344b47ee047b706c76cdd..04fec1fa6e0b6915b5c3e8e4938a668246df7586 100644 (file)
@@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
                            unsigned int pkt_addr, int rx_len)
 {
        UCHAR buff[256];
-       struct rx_msg *msg = (struct rx_msg *)buff;
+       struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
 
        del_timer(&local->timer);
 
@@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
                              unsigned int pkt_addr, int rx_len)
 {
 /*  UCHAR buff[256];
-    struct rx_msg *msg = (struct rx_msg *)buff;
+    struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
 */
        pr_debug("Deauthentication frame received\n");
        local->authentication_state = UNAUTHENTICATED;
index d7646f299bd3264c0d1cd24803178ca1b51cffba..3c3b98b152c30bb7a5b1f2cd3ce20793eb63c212 100644 (file)
@@ -566,9 +566,9 @@ struct phy_header {
     UCHAR hdr_3;
     UCHAR hdr_4;
 };
-struct rx_msg {
+struct ray_rx_msg {
     struct mac_header mac;
-    UCHAR  var[1];
+    UCHAR  var[0];
 };
 
 struct tx_msg {
index 53c5f878f61d56b134956d489ad43d49911d48aa..de7d41f21a69a3667e725e9366244b482c50c205 100644 (file)
@@ -39,7 +39,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index da48c8ac27bd53c3a3ef92d135c66c71d902573d..4941a1a2321907fb93cc8a2fe08a3ce87d8ecf16 100644 (file)
@@ -50,7 +50,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index 7f21005c1bb0c156e7429146a7a40fc3b88c6167..f8eb49f5ac29c7f0b32b51cefa1bb0c265ae8b8e 100644 (file)
@@ -45,7 +45,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index bf55b4a311e3708e95cb7872c30486e7e6ad5d65..e0c6d117429d85c85439b76f41c72ead820cf906 100644 (file)
@@ -41,7 +41,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index cfb19dbb0a67d73fc9ca9f71c09e9a429a452ba8..1c69c737086d307c7d46f3656bc2b6d8209b7ef1 100644 (file)
@@ -40,7 +40,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
index 7c1d82d8d71c01839c8f0ae5b2df07dc6d6876c1..8d6eb0f56c031b7b4b7ac2d43756c88cd9b0f28b 100644 (file)
@@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
        u8 valid = 0;
 
        /*set init state to on */
-       rtlpriv->rfkill.rfkill_state = 1;
+       rtlpriv->rfkill.rfkill_state = true;
        wiphy_rfkill_set_hw_state(hw->wiphy, 0);
 
        radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
index 0d4d242849b46dc72f5a2d59bcd412135c73f3ee..39e0907a3c4eac8d89a9f682dc53d8471cbec560 100644 (file)
@@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
        u8 init_aspm;
 
        ppsc->reg_rfps_level = 0;
-       ppsc->support_aspm = 0;
+       ppsc->support_aspm = false;
 
        /*Update PCI ASPM setting */
        ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
                if (ieee80211_is_nullfunc(fc)) {
                        if (ieee80211_has_pm(fc)) {
                                rtlpriv->mac80211.offchan_delay = true;
-                               rtlpriv->psc.state_inap = 1;
+                               rtlpriv->psc.state_inap = true;
                        } else {
-                               rtlpriv->psc.state_inap = 0;
+                               rtlpriv->psc.state_inap = false;
                        }
                }
 
index f2aa33dc4d7806d89c80ac5ac3471e753c626173..89ef6982ce50ea836c03975c3b39850f0b41aa5e 100644 (file)
@@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
 
        rtl8192ce_bt_reg_init(hw);
 
-       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
        rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
 
index 4ed973a3aa177fd822531eeb3a4d04014a0ee504..124cf633861c3d6f061e44af9083f95a0a12b395 100644 (file)
@@ -2436,7 +2436,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
                         "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
        }
        if (actuallyset) {
-               ppsc->hwradiooff = 1;
+               ppsc->hwradiooff = true;
                if (e_rfpowerstate_toset == ERFON) {
                        if ((ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM) &&
                             RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
index 94a3e17061582e7afacab5cb590590bd69fbb8f2..3527c7957b4529c5a0819b116c5236ba5ec7c13d 100644 (file)
@@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
        const struct firmware *firmware;
        int err;
 
-       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
        rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
index 149493f4c25c650ab2cf5ebcadc78d5d743997da..7911c9c870859ab11c5e03e3bb7cdede5eb0bb54 100644 (file)
@@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
 
        rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
-       rtlpriv->dm.useramask = 1;
+       rtlpriv->dm.useramask = true;
 
        /* dual mac */
        if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
index 92f49d522c56dbc2b81f54b1850c00df6b92e9a2..78723cf59491264706b636be638d68eb5585818f 100644 (file)
@@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
        int err = 0;
        u16 earlyrxthreshold = 7;
 
-       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = true;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
        rtlpriv->dm.useramask = true;
 
index 115969df0c9ef8d6f81f7bd96b35f9e11fe53ffb..cdaf1429fa0b30b6322f31d21f48f5216e51f7d3 100644 (file)
@@ -1488,7 +1488,7 @@ struct rtl_intf_ops {
 
 struct rtl_mod_params {
        /* default: 0 = using hardware encryption */
-       int sw_crypto;
+       bool sw_crypto;
 
        /* default: 0 = DBG_EMERG (0)*/
        int debug;
index 182562952c792a9e347b9a0cc62744ee4e7a8f81..0b5c18feb3038284316ba4dd76cc37d7a36cc727 100644 (file)
@@ -165,7 +165,8 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
        return 0;
 }
 
-static u32 xenvif_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xenvif_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct xenvif *vif = netdev_priv(dev);
 
index 0cb594c86090fa7c25f3578132528f5f616aef26..639cf8ab62ba09b26af2d1796da1463bd4763cfc 100644 (file)
@@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        struct gnttab_copy *copy_gop;
        struct netbk_rx_meta *meta;
        /*
-        * These variables a used iff get_page_ext returns true,
+        * These variables are used iff get_page_ext returns true,
         * in which case they are guaranteed to be initialized.
         */
        unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
                if (!page)
                        return NULL;
 
-               netbk->mmap_pages[pending_idx] = page;
-
                gop->source.u.ref = txp->gref;
                gop->source.domid = vif->domid;
                gop->source.offset = txp->offset;
@@ -1021,7 +1019,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
                pending_idx = *((u16 *)skb->data);
                xen_netbk_idx_release(netbk, pending_idx);
                for (j = start; j < i; j++) {
-                       pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
                        xen_netbk_idx_release(netbk, pending_idx);
                }
 
@@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        continue;
                }
 
-               netbk->mmap_pages[pending_idx] = page;
-
                gop->source.u.ref = txreq.gref;
                gop->source.domid = vif->domid;
                gop->source.offset = txreq.offset;
@@ -1668,7 +1664,7 @@ static int __init netback_init(void)
                                             "netback/%u", group);
 
                if (IS_ERR(netbk->task)) {
-                       printk(KERN_ALERT "kthread_run() fails at netback\n");
+                       printk(KERN_ALERT "kthread_create() fails at netback\n");
                        del_timer(&netbk->net_timer);
                        rc = PTR_ERR(netbk->task);
                        goto failed_init;
index 226faab236032b9e6a9f28d5895cf194bc93ac65..0a59c57864f510ba5bcaf54df933dfbaa76f49e2 100644 (file)
@@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev);
 #define xennet_sysfs_delif(dev) do { } while (0)
 #endif
 
-static int xennet_can_sg(struct net_device *dev)
+static bool xennet_can_sg(struct net_device *dev)
 {
        return dev->features & NETIF_F_SG;
 }
@@ -1190,7 +1190,8 @@ static void xennet_uninit(struct net_device *dev)
        gnttab_free_grant_references(np->gref_rx_head);
 }
 
-static u32 xennet_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xennet_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct netfront_info *np = netdev_priv(dev);
        int val;
@@ -1216,7 +1217,8 @@ static u32 xennet_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int xennet_set_features(struct net_device *dev, u32 features)
+static int xennet_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
                netdev_info(dev, "Reducing MTU because no SG offload");
@@ -1707,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev,
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
-       case XenbusStateConnected:
        case XenbusStateUnknown:
        case XenbusStateClosed:
                break;
@@ -1718,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev,
                if (xennet_connect(netdev) != 0)
                        break;
                xenbus_switch_state(dev, XenbusStateConnected);
+               break;
+
+       case XenbusStateConnected:
                netif_notify_peers(netdev);
                break;
 
index 6d3dd3988d0f2891e65514a2bac68b184409ac1d..0f0cfa3bca301e2d824ad64ebdaba23d6c617825 100644 (file)
 #include <linux/string.h>
 #include <linux/slab.h>
 
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
  * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
        struct of_irq oirq;
 
        if (of_irq_map_one(dev, index, &oirq))
-               return NO_IRQ;
+               return 0;
 
        return irq_create_of_mapping(oirq.controller, oirq.specifier,
                                     oirq.size);
@@ -60,27 +55,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
  */
 struct device_node *of_irq_find_parent(struct device_node *child)
 {
-       struct device_node *p, *c = child;
+       struct device_node *p;
        const __be32 *parp;
 
-       if (!of_node_get(c))
+       if (!of_node_get(child))
                return NULL;
 
        do {
-               parp = of_get_property(c, "interrupt-parent", NULL);
+               parp = of_get_property(child, "interrupt-parent", NULL);
                if (parp == NULL)
-                       p = of_get_parent(c);
+                       p = of_get_parent(child);
                else {
                        if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
                                p = of_node_get(of_irq_dflt_pic);
                        else
                                p = of_find_node_by_phandle(be32_to_cpup(parp));
                }
-               of_node_put(c);
-               c = p;
+               of_node_put(child);
+               child = p;
        } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
 
-       return (p == child) ? NULL : p;
+       return p;
 }
 
 /**
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
        /* Only dereference the resource if both the
         * resource and the irq are valid. */
-       if (r && irq != NO_IRQ) {
+       if (r && irq) {
                r->start = r->end = irq;
                r->flags = IORESOURCE_IRQ;
                r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
 {
        int nr = 0;
 
-       while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+       while (of_irq_to_resource(dev, nr, NULL))
                nr++;
 
        return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
        int i;
 
        for (i = 0; i < nr_irqs; i++, res++)
-               if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+               if (!of_irq_to_resource(dev, i, res))
                        break;
 
        return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
 
                desc->dev = np;
                desc->interrupt_parent = of_irq_find_parent(np);
+               if (desc->interrupt_parent == np)
+                       desc->interrupt_parent = NULL;
                list_add_tail(&desc->list, &intc_desc_list);
        }
 
index cbd5d701c7e086f632f74bf8bec29fe1a23e6a55..63b3ec48c203a43f3d8a9d395e459d765fd7f7b0 100644 (file)
@@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
        if (!lookup)
                return NULL;
 
-       for(; lookup->name != NULL; lookup++) {
+       for(; lookup->compatible != NULL; lookup++) {
                if (!of_device_is_compatible(np, lookup->compatible))
                        continue;
                if (of_address_to_resource(np, 0, &res))
index dccd8636095cb361e2e0e1e2bb8fb7fdd57ecee2..f8c752e408a663d55adf7e84ca4fb42aa93d1d02 100644 (file)
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
        return err;
 }
 
+static int timer_mode;
+
 static int __init oprofile_init(void)
 {
        int err;
 
+       /* always init architecture to setup backtrace support */
        err = oprofile_arch_init(&oprofile_ops);
-       if (err < 0 || timer) {
-               printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+       timer_mode = err || timer;      /* fall back to timer mode on errors */
+       if (timer_mode) {
+               if (!err)
+                       oprofile_arch_exit();
                err = oprofile_timer_init(&oprofile_ops);
                if (err)
                        return err;
        }
-       return oprofilefs_register();
+
+       err = oprofilefs_register();
+       if (!err)
+               return 0;
+
+       /* failed */
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
+
+       return err;
 }
 
 
 static void __exit oprofile_exit(void)
 {
-       oprofile_timer_exit();
        oprofilefs_unregister();
-       oprofile_arch_exit();
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
 }
 
 
index 89f63456646fa4c67b6ee74f7c657917be05a425..84a208dbed939afa20743804d8e4d75ca5ff741a 100644 (file)
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
+       retval = 0;
        if (val)
                retval = oprofile_start();
        else
index d0de6cc2d7a5e56e36d2dfa8b2cc8b6ad17074f5..2f0aa0f700e63985a0abbe29573b3816e1ef1e47 100644 (file)
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
 }
 
 
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
 {
        char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
        raw_spin_lock_irqsave(&oprofilefs_lock, flags);
        *val = simple_strtoul(tmpbuf, NULL, 0);
        raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-       return 0;
+       return count;
 }
 
 
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&value, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(file->private_data, value);
index 3ef44624f5103ddaf405e76fcafd0afe6b27a132..878fba1265829cdab586a145d86a332b5ce32874 100644 (file)
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
        ops->start = oprofile_hrtimer_start;
        ops->stop = oprofile_hrtimer_stop;
        ops->cpu_type = "timer";
+       printk(KERN_INFO "oprofile: using timer interrupt.\n");
        return 0;
 }
 
index b6f9749b4fa769638392610221b0751fba73eabb..f02b5235056d938ac2713c4534ff0bd3d89d7c8f 100644 (file)
@@ -76,6 +76,7 @@ config PCI_IOV
 
 config PCI_PRI
        bool "PCI PRI support"
+       depends on PCI
        select PCI_ATS
        help
          PRI is the PCI Page Request Interface. It allows PCI devices that are
index 7ec56fb0bd78aca5aec916aefd8788cc1b101027..b0dd08e6a9da1cc4f8ee78ebf53f1e4e5f1de2b2 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/pci-ats.h>
 #include <linux/pci.h>
+#include <linux/slab.h>
 
 #include "pci.h"
 
index 596172b4ae955802a5778dd0c375b870b9a11909..9ddf69e3bbef03487e12ea814be79bb3258fc0fa 100644 (file)
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
        if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
                return AE_OK;
 
+       pdev = pbus->self;
+       if (pdev && pci_is_pcie(pdev)) {
+               tmp = acpi_find_root_bridge_handle(pdev);
+               if (tmp) {
+                       struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+                       if (root && (root->osc_control_set &
+                                       OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+                               return AE_OK;
+               }
+       }
+
        acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
 
        pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
        if (pdev) {
-               pdev->current_state = PCI_D0;
                slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
                pci_dev_put(pdev);
        }
@@ -1378,11 +1389,13 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
        int *count = (int *)context;
 
-       if (acpi_is_root_bridge(handle)) {
-               acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                               handle_hotplug_event_bridge, NULL);
-                       (*count)++;
-       }
+       if (!acpi_is_root_bridge(handle))
+               return AE_OK;
+
+       (*count)++;
+       acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+                                   handle_hotplug_event_bridge, NULL);
+
        return AE_OK ;
 }
 
index 1e9c9aacc3a6c82363b39602c5f6bab1b4bbd30d..085dbb5fc168be33de77d0ced5c411c807ffc2ef 100644 (file)
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot)
                goto err_exit;
        }
 
-       /* Wait for 1 second after checking link training status */
-       msleep(1000);
-
        /* Check for a power fault */
        if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
                ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
index 96dc4734e4affcc6c7f763a9853342678df8a7c6..7b1414810ae3e0e0a5ccbcd9bec0f21051cf95db 100644 (file)
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl)
         else
                 msleep(1000);
 
+       /*
+        * Need to wait for 1000 ms after Data Link Layer Link Active
+        * (DLLLA) bit reads 1b before sending configuration request.
+        * We need it before checking Link Training (LT) bit becuase
+        * LT is still set even after DLLLA bit is set on some platform.
+        */
+       msleep(1000);
+
        retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
        if (retval) {
                ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl)
                return retval;
        }
 
+       /*
+        * If the port supports Link speeds greater than 5.0 GT/s, we
+        * must wait for 100 ms after Link training completes before
+        * sending configuration request.
+        */
+       if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT)
+               msleep(100);
+
+       pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
        return retval;
 }
 
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot)
        u16 slot_cmd;
        u16 cmd_mask;
        u16 slot_status;
-       u16 lnk_status;
        int retval = 0;
 
        /* Clear sticky power-fault bit from previous power failures */
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot)
        ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
                 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
 
-       retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
-       if (retval) {
-               ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
-                               __func__);
-               return retval;
-       }
-       pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
-
        return retval;
 }
 
index aca972bbfb4c8ba04e7fc74efd74636ebe49ea65..dd7e0c51a33e5e5f79eb9f11b444fe4667d2ba2d 100644 (file)
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
 
 static int is_shpc_capable(struct pci_dev *dev)
 {
-       if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
-                                               PCI_DEVICE_ID_AMD_GOLAM_7450))
+       if (dev->vendor == PCI_VENDOR_ID_AMD &&
+           dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
                return 1;
        if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
                return 0;
index 36547f0ce305e987c7a414a47d6a4e5806b239cb..75ba2311b54f3f37b62176a37e032c480be9f3f1 100644 (file)
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
        ctrl->pci_dev = pdev;  /* pci_dev of the P2P bridge */
        ctrl_dbg(ctrl, "Hotplug Controller:\n");
 
-       if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
-                               PCI_DEVICE_ID_AMD_GOLAM_7450)) {
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+           pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
                /* amd shpc driver doesn't use Base Offset; assume 0 */
                ctrl->mmio_base = pci_resource_start(pdev, 0);
                ctrl->mmio_size = pci_resource_len(pdev, 0);
index b82c155d7b37f539eb85ce28512219a6b701cf52..1969a3ee3058328e469a0fc6e529f9841f5708ab 100644 (file)
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
        struct resource *res;
        struct pci_dev *pdev;
        struct pci_sriov *iov = dev->sriov;
+       int bars = 0;
 
        if (!nr_virtfn)
                return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 
        nres = 0;
        for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+               bars |= (1 << (i + PCI_IOV_RESOURCES));
                res = dev->resource + PCI_IOV_RESOURCES + i;
                if (res->parent)
                        nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
                return -ENOMEM;
        }
 
+       if (pci_enable_resources(dev, bars)) {
+               dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+               return -ENOMEM;
+       }
+
        if (iov->link != dev->devfn) {
                pdev = pci_get_slot(dev->bus, iov->link);
                if (!pdev)
index 6f45a73c6e9fa38c9e09fbf3d5a4853d8cc3396c..6d4a5319148d7eb293eb499cb98cfc56e496f078 100644 (file)
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
                error = platform_pci_set_power_state(dev, state);
                if (!error)
                        pci_update_current_state(dev, state);
+               /* Fall back to PCI_D0 if native PM is not supported */
+               if (!dev->pm_cap)
+                       dev->current_state = PCI_D0;
        } else {
                error = -ENODEV;
                /* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
        if (atomic_add_return(1, &dev->enable_cnt) > 1)
                return 0;               /* already enabled */
 
-       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       /* only skip sriov related */
+       for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+               if (dev->resource[i].flags & flags)
+                       bars |= (1 << i);
+       for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
                if (dev->resource[i].flags & flags)
                        bars |= (1 << i);
 
index a43cfd906c6d678759d241a3fae9363889e1c15d..d93e962f26100c7e0498f80f7dac437c6f0e87f5 100644 (file)
@@ -589,14 +589,14 @@ static const struct backlight_ops dell_ops = {
        .update_status  = dell_send_intensity,
 };
 
-static void touchpad_led_on()
+static void touchpad_led_on(void)
 {
        int command = 0x97;
        char data = 1;
        i8042_command(&data, command | 1 << 12);
 }
 
-static void touchpad_led_off()
+static void touchpad_led_off(void)
 {
        int command = 0x97;
        char data = 2;
index 13ef8c37471d0a6575fed59d6cf92dece74c34ba..dcdc1f4a4624d782d35f3776d1a69bbfdf983cdb 100644 (file)
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
        int illumination_supported:1;
        int video_supported:1;
        int fan_supported:1;
+       int system_event_supported:1;
 
        struct mutex mutex;
 };
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
        u32 hci_result;
        u32 value;
 
-       if (!dev->key_event_valid) {
+       if (!dev->key_event_valid && dev->system_event_supported) {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
                if (hci_result == HCI_SUCCESS) {
                        dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
 
        /* enable event fifo */
        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+       if (hci_result == HCI_SUCCESS)
+               dev->system_event_supported = 1;
 
        props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
 {
        struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
        u32 hci_result, value;
+       int retries = 3;
 
-       if (event != 0x80)
+       if (!dev->system_event_supported || event != 0x80)
                return;
+
        do {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
-               if (hci_result == HCI_SUCCESS) {
+               switch (hci_result) {
+               case HCI_SUCCESS:
                        if (value == 0x100)
                                continue;
                        /* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
                                pr_info("Unknown key %x\n",
                                       value);
                        }
-               } else if (hci_result == HCI_NOT_SUPPORTED) {
+                       break;
+               case HCI_NOT_SUPPORTED:
                        /* This is a workaround for an unresolved issue on
                         * some machines where system events sporadically
                         * become disabled. */
                        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
                        pr_notice("Re-enabled hotkeys\n");
+                       /* fall through */
+               default:
+                       retries--;
+                       break;
                }
-       } while (hci_result != HCI_EMPTY);
+       } while (retries && hci_result != HCI_EMPTY);
 }
 
 
index cffcb7c00b0068da7b32a467358b7e71a0388e13..01fa671ec97f6af9a31bf1a8c39ceef86691617b 100644 (file)
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
 #define PMIC_BATT_CHR_SBATDET_MASK     (1 << 5)
 #define PMIC_BATT_CHR_SDCLMT_MASK      (1 << 6)
 #define PMIC_BATT_CHR_SUSBOVP_MASK     (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK       0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK       0x86
+
 #define PMIC_BATT_ADC_ACCCHRG_MASK     (1 << 31)
 #define PMIC_BATT_ADC_ACCCHRGVAL_MASK  0x7FFFFFFF
 
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
                        pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
                        batt_exception = 1;
-               } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
-                       pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-                       pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-                       pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
-                       batt_exception = 1;
                } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        batt_exception = 1;
                } else {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+                       if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+                               /* PMIC will change charging current automatically */
+                               pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+                       }
                }
        }
 
index cf3f9997546dc41d10a143390e834d0926720964..10451a15e8284f33be26996363c5ffd6c79ad56a 100644 (file)
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
 
 static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
 {
-       return 1; /* always round timer functions to one nanosecond */
+       tp->tv_sec = 0;
+       tp->tv_nsec = 1;
+       return 0;
 }
 
 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
index 5225930a10cd24fd5003d553bfd65ada9e10eebc..691b1ab1a3d0499d85815bd03551e9d22518e494 100644 (file)
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
        INIT_WORK(&priv->idb_work, tsi721_db_dpc);
 
        /* Allocate buffer for inbound doorbells queue */
-       priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
                                IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
                                &priv->idb_dma, GFP_KERNEL);
        if (!priv->idb_base)
                return -ENOMEM;
 
-       memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
        dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
                priv->idb_base, (unsigned long long)priv->idb_dma);
 
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
         */
 
        /* Allocate space for DMA descriptors */
-       bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                        bd_num * sizeof(struct tsi721_dma_desc),
                                        &bd_phys, GFP_KERNEL);
        if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].bd_phys = bd_phys;
        priv->bdma[chnum].bd_base = bd_ptr;
 
-       memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
        dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
                bd_ptr, (unsigned long long)bd_phys);
 
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
                                        bd_num : TSI721_DMA_MINSTSSZ;
        sts_size = roundup_pow_of_two(sts_size);
-       sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                     sts_size * sizeof(struct tsi721_dma_sts),
                                     &sts_phys, GFP_KERNEL);
        if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].sts_base = sts_ptr;
        priv->bdma[chnum].sts_size = sts_size;
 
-       memset(sts_ptr, 0, sts_size);
-
        dev_dbg(&priv->pdev->dev,
                "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
                sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
 
        /* Outbound message descriptor status FIFO allocation */
        priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
-       priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
                        priv->omsg_ring[mbox].sts_size *
                                                sizeof(struct tsi721_dma_sts),
                        &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
                goto out_desc;
        }
 
-       memset(priv->omsg_ring[mbox].sts_base, 0,
-               entries * sizeof(struct tsi721_dma_sts));
-
        /*
         * Configure Outbound Messaging Engine
         */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        INIT_LIST_HEAD(&mport->dbells);
 
        rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
-       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
-       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
        strcpy(mport->name, "Tsi721 mport");
 
        /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
        struct tsi721_device *priv;
-       int i;
+       int i, cap;
        int err;
        u32 regval;
 
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                        dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
        }
 
-       /* Clear "no snoop" and "relaxed ordering" bits. */
-       pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
-       regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
-       pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+       cap = pci_pcie_cap(pdev);
+       BUG_ON(cap == 0);
+
+       /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+       regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+                   PCI_EXP_DEVCTL_NOSNOOP_EN);
+       regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+       /* Adjust PCIe completion timeout. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+       regval &= ~(0x0f);
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
 
        /*
         * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
index 58be4deb1402ad9f2a119c196e208a8b0d2f2593..822e54c394d5cd0690ee80d2c5173eb05140018f 100644 (file)
@@ -72,6 +72,8 @@
 #define TSI721_MSIXPBA_OFFSET  0x2a000
 #define TSI721_PCIECFG_EPCTL   0x400
 
+#define MAX_READ_REQUEST_SZ_SHIFT      12
+
 /*
  * Event Management Registers
  */
index 5abeb3ac3e8da43df3d58bf677c3ac0e3d17116e..298c6c6a279574561eb24f329b115fe134d274a1 100644 (file)
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
                        break;
        }
 
-       if (!ri)
+       if (i == ARRAY_SIZE(aat2870_regulators))
                return NULL;
 
        ri->enable_addr = AAT2870_LDO_EN;
index 669d0216022195e36bcb8f59b25cf14de1dff6c9..938398f3e869c57814fc130b9133ac435118214e 100644 (file)
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
        list_del(&rdev->list);
        if (rdev->supply)
                regulator_put(rdev->supply);
-       device_unregister(&rdev->dev);
        kfree(rdev->constraints);
+       device_unregister(&rdev->dev);
        mutex_unlock(&regulator_list_mutex);
 }
 EXPORT_SYMBOL_GPL(regulator_unregister);
index 66d2d60b436a3e7a28092d64a499fd203db764a5..b552aae55b417c7cd9aa97fba33bd61ad0b7e787 100644 (file)
@@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
 
        switch (id) {
        case TPS65910_REG_VDD1:
-               dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+               dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
                if (dcdc_mult == 1)
                        dcdc_mult--;
-               vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+               vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
 
                tps65910_modify_bits(pmic, TPS65910_VDD1,
                                (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
@@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
                tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
                break;
        case TPS65910_REG_VDD2:
-               dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+               dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
                if (dcdc_mult == 1)
                        dcdc_mult--;
-               vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+               vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
 
                tps65910_modify_bits(pmic, TPS65910_VDD2,
                                (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
@@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
        switch (id) {
        case TPS65910_REG_VDD1:
        case TPS65910_REG_VDD2:
-               mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+               mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
                volt = VDD1_2_MIN_VOLT +
-                               (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+                               (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
                break;
        case TPS65911_REG_VDDCTRL:
                volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
 
                if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
                        pmic->desc[i].ops = &tps65910_ops_dcdc;
+                       pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
+                                                       VDD1_2_NUM_VOLT_COARSE;
                } else if (i == TPS65910_REG_VDD3) {
                        if (tps65910_chip_id(tps65910) == TPS65910)
                                pmic->desc[i].ops = &tps65910_ops_vdd3;
index ee8747f4fa08b187ef2f79dbb51e3f69a770bc21..11cc308d66e925db83fa8c50697e734608aac1b8 100644 (file)
@@ -71,6 +71,7 @@ struct twlreg_info {
 #define VREG_TYPE              1
 #define VREG_REMAP             2
 #define VREG_DEDICATED         3       /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
 /* TWL6030 register offsets */
 #define VREG_TRANS             1
 #define VREG_STATE             2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
        .get_status     = twl4030reg_get_status,
 };
 
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+                       unsigned *selector)
+{
+       struct twlreg_info *info = rdev_get_drvdata(rdev);
+       int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+       twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+               vsel);
+       return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+       struct twlreg_info *info = rdev_get_drvdata(rdev);
+       int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+               VREG_VOLTAGE_SMPS_4030);
+
+       return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+       .set_voltage    = twl4030smps_set_voltage,
+       .get_voltage    = twl4030smps_get_voltage,
+};
+
 static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
                }, \
        }
 
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+       { \
+       .base = offset, \
+       .id = num, \
+       .delay = turnon_delay, \
+       .remap = remap_conf, \
+       .desc = { \
+               .name = #label, \
+               .id = TWL4030_REG_##label, \
+               .ops = &twl4030smps_ops, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
 #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
        .base = offset, \
        .min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
        TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
        TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
        TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
-       TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
-       TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+       TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+       TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
        TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
        TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
        TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
index e8326f26fa2f5c5ca11d26901f5e70052a1ffea6..dc4c2748bbc38bfac593cc47a2ff7bac6a34c8fe 100644 (file)
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
         */
        delta = timespec_sub(old_system, old_rtc);
        delta_delta = timespec_sub(delta, old_delta);
-       if (abs(delta_delta.tv_sec)  >= 2) {
+       if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
                /*
                 * if delta_delta is too large, assume time correction
                 * has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
        rtc_tm_to_time(&tm, &new_rtc.tv_sec);
        new_rtc.tv_nsec = 0;
 
-       if (new_rtc.tv_sec <= old_rtc.tv_sec) {
-               if (new_rtc.tv_sec < old_rtc.tv_sec)
-                       pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
+       if (new_rtc.tv_sec < old_rtc.tv_sec) {
+               pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
                return 0;
        }
 
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
        sleep_time = timespec_sub(sleep_time,
                        timespec_sub(new_system, old_system));
 
-       timekeeping_inject_sleeptime(&sleep_time);
+       if (sleep_time.tv_sec >= 0)
+               timekeeping_inject_sleeptime(&sleep_time);
        return 0;
 }
 
index eda128fc1d38729ebb1a385c10fd30e6345e852b..64aedd8cc095810e4134ceb727538a24b2af6232 100644 (file)
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 static struct rtc_class_ops m41t80_rtc_ops = {
        .read_time = m41t80_rtc_read_time,
        .set_time = m41t80_rtc_set_time,
+       /*
+        * XXX - m41t80 alarm functionality is reported broken.
+        * until it is fixed, don't register alarm functions.
+        *
        .read_alarm = m41t80_rtc_read_alarm,
        .set_alarm = m41t80_rtc_set_alarm,
+       */
        .proc = m41t80_rtc_proc,
+       /*
+        * See above comment on broken alarm
+        *
        .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+       */
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
index b3eba3cddd42658f76cb7ec84a3c13187993d1b4..e4b6880aabd05492b2a08c0511f400340b9fc0fa 100644 (file)
@@ -220,7 +220,7 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
        }
 }
 
-static int puv3_rtc_remove(struct platform_device *dev)
+static int __devexit puv3_rtc_remove(struct platform_device *dev)
 {
        struct rtc_device *rtc = platform_get_drvdata(dev);
 
@@ -236,7 +236,7 @@ static int puv3_rtc_remove(struct platform_device *dev)
        return 0;
 }
 
-static int puv3_rtc_probe(struct platform_device *pdev)
+static int __devinit puv3_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
        struct resource *res;
index 7639ab906f02e35ceec0a75bfaddd418e17478e8..5b979d9cc3324ffccd455da21e09a35b88f753ad 100644 (file)
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
        void __iomem *base = s3c_rtc_base;
        int year = tm->tm_year - 100;
 
-       clk_enable(rtc_clk);
        pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
                 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
+       clk_enable(rtc_clk);
        writeb(bin2bcd(tm->tm_sec),  base + S3C2410_RTCSEC);
        writeb(bin2bcd(tm->tm_min),  base + S3C2410_RTCMIN);
        writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
index 43068fbd0baacfe8beb7d353cda35a8c06c371e2..1b6d9247fdc78a4237d5e7048347a0a66875d140 100644 (file)
@@ -641,6 +641,8 @@ static int __init zcore_init(void)
 
        if (ipl_info.type != IPL_TYPE_FCP_DUMP)
                return -ENODATA;
+       if (OLDMEM_BASE)
+               return -ENODATA;
 
        zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
        debug_register_view(zcore_dbf, &debug_sprintf_view);
index 75c3f1f8fd434301c3ba4a07a632e0ffefa6aac3..a84631a7391d3ed50c680b939129d159fc25daa3 100644 (file)
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
 int chsc_chp_vary(struct chp_id chpid, int on)
 {
        struct channel_path *chp = chpid_to_chp(chpid);
-       struct chp_link link;
 
-       memset(&link, 0, sizeof(struct chp_link));
-       link.chpid = chpid;
        /* Wait until previous actions have settled. */
        css_wait_for_slow_path();
        /*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
                /* Try to update the channel path descritor. */
                chsc_determine_base_channel_path_desc(chpid, &chp->desc);
                for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
-                                          __s390_vary_chpid_on, &link);
+                                          __s390_vary_chpid_on, &chpid);
        } else
                for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
-                                          NULL, &link);
+                                          NULL, &chpid);
 
        return 0;
 }
index 155a82bcb9e545e2888430337d0b1c2b50239acd..4a1ff5c2eb881355204ffe8e047cfbe3bb8e3706 100644 (file)
@@ -68,8 +68,13 @@ struct schib {
        __u8 mda[4];             /* model dependent area */
 } __attribute__ ((packed,aligned(4)));
 
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
 enum sch_todo {
        SCH_TODO_NOTHING,
+       SCH_TODO_EVAL,
        SCH_TODO_UNREG,
 };
 
index 92d7324acb1c78fbab348a2ea190c8901351df9d..21908e67bf6745d8dc91f791347a9d9cee538aa1 100644 (file)
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
 }
 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 
-static void css_sch_todo(struct work_struct *work)
-{
-       struct subchannel *sch;
-       enum sch_todo todo;
-
-       sch = container_of(work, struct subchannel, todo_work);
-       /* Find out todo. */
-       spin_lock_irq(sch->lock);
-       todo = sch->todo;
-       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
-                     sch->schid.sch_no, todo);
-       sch->todo = SCH_TODO_NOTHING;
-       spin_unlock_irq(sch->lock);
-       /* Perform todo. */
-       if (todo == SCH_TODO_UNREG)
-               css_sch_device_unregister(sch);
-       /* Release workqueue ref. */
-       put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
-       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
-                     sch->schid.ssid, sch->schid.sch_no, todo);
-       if (sch->todo >= todo)
-               return;
-       /* Get workqueue ref. */
-       if (!get_device(&sch->dev))
-               return;
-       sch->todo = todo;
-       if (!queue_work(cio_work_q, &sch->todo_work)) {
-               /* Already queued, release workqueue ref. */
-               put_device(&sch->dev);
-       }
-}
-
 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 {
        int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
                css_schedule_eval(schid);
 }
 
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+                     sch->schid.ssid, sch->schid.sch_no, todo);
+       if (sch->todo >= todo)
+               return;
+       /* Get workqueue ref. */
+       if (!get_device(&sch->dev))
+               return;
+       sch->todo = todo;
+       if (!queue_work(cio_work_q, &sch->todo_work)) {
+               /* Already queued, release workqueue ref. */
+               put_device(&sch->dev);
+       }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+       struct subchannel *sch;
+       enum sch_todo todo;
+       int ret;
+
+       sch = container_of(work, struct subchannel, todo_work);
+       /* Find out todo. */
+       spin_lock_irq(sch->lock);
+       todo = sch->todo;
+       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+                     sch->schid.sch_no, todo);
+       sch->todo = SCH_TODO_NOTHING;
+       spin_unlock_irq(sch->lock);
+       /* Perform todo. */
+       switch (todo) {
+       case SCH_TODO_NOTHING:
+               break;
+       case SCH_TODO_EVAL:
+               ret = css_evaluate_known_subchannel(sch, 1);
+               if (ret == -EAGAIN) {
+                       spin_lock_irq(sch->lock);
+                       css_sched_sch_todo(sch, todo);
+                       spin_unlock_irq(sch->lock);
+               }
+               break;
+       case SCH_TODO_UNREG:
+               css_sch_device_unregister(sch);
+               break;
+       }
+       /* Release workqueue ref. */
+       put_device(&sch->dev);
+}
+
 static struct idset *slow_subchannel_set;
 static spinlock_t slow_subchannel_lock;
 static wait_queue_head_t css_eval_wq;
index d734f4a0ecac23cea1d821b316a563b71087ccc6..47269858ecb662af862c38a5e96fc4f9aacfe2ca 100644 (file)
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
         */
        cdev->private->flags.resuming = 1;
        cdev->private->path_new_mask = LPM_ANYPATH;
-       css_schedule_eval(sch->schid);
+       css_sched_sch_todo(sch, SCH_TODO_EVAL);
        spin_unlock_irq(sch->lock);
-       css_complete_work();
+       css_wait_for_slow_path();
 
        /* cdev may have been moved to a different subchannel. */
        sch = to_subchannel(cdev->dev.parent);
index 52c233fa2b1281d14a2881618465606447bef365..1b853513c891ca2f010f8703b110f49c6f65afb3 100644 (file)
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
        cdev->private->pgid_reset_mask = 0;
 }
 
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+       memset(irb, 0, sizeof(*irb));
+       if (type == FAKE_CMD_IRB) {
+               struct cmd_scsw *scsw = &irb->scsw.cmd;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       } else if (type == FAKE_TM_IRB) {
+               struct tm_scsw *scsw = &irb->scsw.tm;
+               scsw->x = 1;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
 {
        struct subchannel *sch;
 
@@ -520,12 +538,8 @@ callback:
                ccw_device_done(cdev, DEV_STATE_ONLINE);
                /* Deliver fake irb to device driver, if needed. */
                if (cdev->private->flags.fake_irb) {
-                       memset(&cdev->private->irb, 0, sizeof(struct irb));
-                       cdev->private->irb.scsw.cmd.cc = 1;
-                       cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
-                       cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
-                       cdev->private->irb.scsw.cmd.stctl =
-                               SCSW_STCTL_STATUS_PEND;
+                       create_fake_irb(&cdev->private->irb,
+                                       cdev->private->flags.fake_irb);
                        cdev->private->flags.fake_irb = 0;
                        if (cdev->handler)
                                cdev->handler(cdev, cdev->private->intparm,
index f98698d5735e887e0fb6cc46f00a63012ecdccb5..ec7fb6d3b479a25a32bfad67ecc36a3539782b39 100644 (file)
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        if (cdev->private->state == DEV_STATE_VERIFY) {
                /* Remember to fake irb when finished. */
                if (!cdev->private->flags.fake_irb) {
-                       cdev->private->flags.fake_irb = 1;
+                       cdev->private->flags.fake_irb = FAKE_CMD_IRB;
                        cdev->private->intparm = intparm;
                        return 0;
                } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        ret = cio_set_options (sch, flags);
        if (ret)
                return ret;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
        sch = to_subchannel(cdev->dev.parent);
        if (!sch->schib.pmcw.ena)
                return -EINVAL;
+       if (cdev->private->state == DEV_STATE_VERIFY) {
+               /* Remember to fake irb when finished. */
+               if (!cdev->private->flags.fake_irb) {
+                       cdev->private->flags.fake_irb = FAKE_TM_IRB;
+                       cdev->private->intparm = intparm;
+                       return 0;
+               } else
+                       /* There's already a fake I/O around. */
+                       return -EBUSY;
+       }
        if (cdev->private->state != DEV_STATE_ONLINE)
                return -EIO;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
index 2ebb492a5c17dcb8a9e05fb23209162edb43d849..76253dfcc1be86a18eba7ac7ea4d6f53c134bc7e 100644 (file)
@@ -111,6 +111,9 @@ enum cdev_todo {
        CDEV_TODO_UNREG_EVAL,
 };
 
+#define FAKE_CMD_IRB   1
+#define FAKE_TM_IRB    2
+
 struct ccw_device_private {
        struct ccw_device *cdev;
        struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
                unsigned int doverify:1;    /* delayed path verification */
                unsigned int donotify:1;    /* call notify function */
                unsigned int recog_done:1;  /* dev. recog. complete */
-               unsigned int fake_irb:1;    /* deliver faked irb */
+               unsigned int fake_irb:2;    /* deliver faked irb */
                unsigned int resuming:1;    /* recognition while resume */
                unsigned int pgroup:1;      /* pathgroup is set up */
                unsigned int mpath:1;       /* multipathing is set up */
index b77ae519d79c4eea8c6d87a1e08609dc70ae0276..96bbe9d12a79fbef17fa9b4df331adf5a24a96c3 100644 (file)
@@ -1271,18 +1271,16 @@ ap_config_timeout(unsigned long ptr)
 }
 
 /**
- * ap_schedule_poll_timer(): Schedule poll timer.
+ * __ap_schedule_poll_timer(): Schedule poll timer.
  *
  * Set up the timer to run the poll tasklet
  */
-static inline void ap_schedule_poll_timer(void)
+static inline void __ap_schedule_poll_timer(void)
 {
        ktime_t hr_time;
 
        spin_lock_bh(&ap_poll_timer_lock);
-       if (ap_using_interrupts() || ap_suspend_flag)
-               goto out;
-       if (hrtimer_is_queued(&ap_poll_timer))
+       if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
                goto out;
        if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
                hr_time = ktime_set(0, poll_timeout);
@@ -1293,6 +1291,18 @@ out:
        spin_unlock_bh(&ap_poll_timer_lock);
 }
 
+/**
+ * ap_schedule_poll_timer(): Schedule poll timer.
+ *
+ * Set up the timer to run the poll tasklet
+ */
+static inline void ap_schedule_poll_timer(void)
+{
+       if (ap_using_interrupts())
+               return;
+       __ap_schedule_poll_timer();
+}
+
 /**
  * ap_poll_read(): Receive pending reply messages from an AP device.
  * @ap_dev: pointer to the AP device
@@ -1374,8 +1384,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
                        *flags |= 1;
                *flags |= 2;
                break;
-       case AP_RESPONSE_Q_FULL:
        case AP_RESPONSE_RESET_IN_PROGRESS:
+               __ap_schedule_poll_timer();
+       case AP_RESPONSE_Q_FULL:
                *flags |= 2;
                break;
        case AP_RESPONSE_MESSAGE_TOO_BIG:
@@ -1541,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
        rc = ap_init_queue(ap_dev->qid);
        if (rc == -ENODEV)
                ap_dev->unregistered = 1;
+       else
+               __ap_schedule_poll_timer();
 }
 
 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
index 94f49ffa70ba22e5e27508c93d139a2b85983ab2..8af868bab20b52f9e2c11d4266a977c79ff99c01 100644 (file)
@@ -263,6 +263,11 @@ error:
        return PTR_ERR(vqs[i]);
 }
 
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+       return "";
+}
+
 /*
  * The config ops structure as defined by virtio config
  */
@@ -276,6 +281,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
        .reset = kvm_reset,
        .find_vqs = kvm_find_vqs,
        .del_vqs = kvm_del_vqs,
+       .bus_name = kvm_bus_name,
 };
 
 /*
index fa80ba1f0344318a30697cf09af4a44cb135f857..9b66d2d1809b30647c2db82c88ba62306f2061b3 100644 (file)
@@ -4,7 +4,7 @@ menu "S/390 network device drivers"
 config LCS
        def_tristate m
        prompt "Lan Channel Station Interface"
-       depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
+       depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
        help
           Select this option if you want to use LCS networking on IBM System z.
           This device driver supports Token Ring (IEEE 802.5),
index c28713da1ec5d380f9e904bab6f2946016cb76ec..863fc2197155c36c0f956cd9d3bb7781d8b46778 100644 (file)
@@ -50,7 +50,7 @@
 #include "lcs.h"
 
 
-#if !defined(CONFIG_NET_ETHERNET) && \
+#if !defined(CONFIG_ETHERNET) && \
     !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
 #error Cannot compile lcs.c without some net devices switched on.
 #endif
@@ -1634,7 +1634,7 @@ lcs_startlan_auto(struct lcs_card *card)
        int rc;
 
        LCS_DBF_TEXT(2, trace, "strtauto");
-#ifdef CONFIG_NET_ETHERNET
+#ifdef CONFIG_ETHERNET
        card->lan_type = LCS_FRAME_TYPE_ENET;
        rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
        if (rc == 0)
@@ -2166,7 +2166,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
                goto netdev_out;
        }
        switch (card->lan_type) {
-#ifdef CONFIG_NET_ETHERNET
+#ifdef CONFIG_ETHERNET
        case LCS_FRAME_TYPE_ENET:
                card->lan_type_trans = eth_type_trans;
                dev = alloc_etherdev(0);
index 3251333a23df18b6e8f378f0f681ac818145f0e1..8160591913f95bd7555ac70129bbd06ef9ea0b60 100644 (file)
@@ -63,6 +63,7 @@
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
+#include <asm/ebcdic.h>
 
 #include <net/iucv/iucv.h>
 #include "fsm.h"
@@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  * Debug Facility stuff
  */
 #define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_LEN 64
 #define IUCV_DBF_SETUP_PAGES 2
 #define IUCV_DBF_SETUP_NR_AREAS 1
 #define IUCV_DBF_SETUP_LEVEL 3
@@ -226,6 +227,7 @@ struct iucv_connection {
        struct net_device         *netdev;
        struct connection_profile prof;
        char                      userid[9];
+       char                      userdata[17];
 };
 
 /**
@@ -263,7 +265,7 @@ struct ll_header {
 };
 
 #define NETIUCV_HDRLEN          (sizeof(struct ll_header))
-#define NETIUCV_BUFSIZE_MAX      32768
+#define NETIUCV_BUFSIZE_MAX     65537
 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
 #define NETIUCV_MTU_DEFAULT      9216
@@ -288,7 +290,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev)
        return test_and_set_bit(0, &priv->tbusy);
 }
 
-static u8 iucvMagic[16] = {
+static u8 iucvMagic_ascii[16] = {
+       0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+       0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 };
@@ -301,18 +308,38 @@ static u8 iucvMagic[16] = {
  *
  * @returns The printable string (static data!!)
  */
-static char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name, int len)
 {
-       static char tmp[9];
+       static char tmp[17];
        char *p = tmp;
-       memcpy(tmp, name, 8);
-       tmp[8] = '\0';
-       while (*p && (!isspace(*p)))
+       memcpy(tmp, name, len);
+       tmp[len] = '\0';
+       while (*p && ((p - tmp) < len) && (!isspace(*p)))
                p++;
        *p = '\0';
        return tmp;
 }
 
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+       static char tmp_uid[9];
+       static char tmp_udat[17];
+       static char buf[100];
+
+       if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+               tmp_uid[8] = '\0';
+               tmp_udat[16] = '\0';
+               memcpy(tmp_uid, conn->userid, 8);
+               memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+               memcpy(tmp_udat, conn->userdata, 16);
+               EBCASC(tmp_udat, 16);
+               memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+               sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+               return buf;
+       } else
+               return netiucv_printname(conn->userid, 8);
+}
+
 /**
  * States of the interface statemachine.
  */
@@ -563,15 +590,18 @@ static int netiucv_callback_connreq(struct iucv_path *path,
 {
        struct iucv_connection *conn = path->private;
        struct iucv_event ev;
+       static char tmp_user[9];
+       static char tmp_udat[17];
        int rc;
 
-       if (memcmp(iucvMagic, ipuser, 16))
-               /* ipuser must match iucvMagic. */
-               return -EINVAL;
        rc = -EINVAL;
+       memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+       memcpy(tmp_udat, ipuser, 16);
+       EBCASC(tmp_udat, 16);
        read_lock_bh(&iucv_connection_rwlock);
        list_for_each_entry(conn, &iucv_connection_list, list) {
-               if (strncmp(ipvmid, conn->userid, 8))
+               if (strncmp(ipvmid, conn->userid, 8) ||
+                   strncmp(ipuser, conn->userdata, 16))
                        continue;
                /* Found a matching connection for this path. */
                conn->path = path;
@@ -580,6 +610,8 @@ static int netiucv_callback_connreq(struct iucv_path *path,
                fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
                rc = 0;
        }
+       IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+                      tmp_user, netiucv_printname(tmp_udat, 16));
        read_unlock_bh(&iucv_connection_rwlock);
        return rc;
 }
@@ -816,7 +848,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
        conn->path = path;
        path->msglim = NETIUCV_QUEUELEN_DEFAULT;
        path->flags = 0;
-       rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
+       rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
        if (rc) {
                IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
                return;
@@ -854,7 +886,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
 
        IUCV_DBF_TEXT(trace, 3, __func__);
        fsm_deltimer(&conn->timer);
-       iucv_path_sever(conn->path, NULL);
+       iucv_path_sever(conn->path, conn->userdata);
        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 }
 
@@ -867,9 +899,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
        IUCV_DBF_TEXT(trace, 3, __func__);
 
        fsm_deltimer(&conn->timer);
-       iucv_path_sever(conn->path, NULL);
-       dev_info(privptr->dev, "The peer interface of the IUCV device"
-               " has closed the connection\n");
+       iucv_path_sever(conn->path, conn->userdata);
+       dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+                              "connection\n", netiucv_printuser(conn));
        IUCV_DBF_TEXT(data, 2,
                      "conn_action_connsever: Remote dropped connection\n");
        fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -886,8 +918,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
        IUCV_DBF_TEXT(trace, 3, __func__);
 
        fsm_newstate(fi, CONN_STATE_STARTWAIT);
-       IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
-               netdev->name, conn->userid);
 
        /*
         * We must set the state before calling iucv_connect because the
@@ -897,8 +927,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
 
        fsm_newstate(fi, CONN_STATE_SETUPWAIT);
        conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+       IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+               netdev->name, netiucv_printuser(conn));
+
        rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
-                              NULL, iucvMagic, conn);
+                              NULL, conn->userdata, conn);
        switch (rc) {
        case 0:
                netdev->tx_queue_len = conn->path->msglim;
@@ -908,13 +941,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
        case 11:
                dev_warn(privptr->dev,
                        "The IUCV device failed to connect to z/VM guest %s\n",
-                       netiucv_printname(conn->userid));
+                       netiucv_printname(conn->userid, 8));
                fsm_newstate(fi, CONN_STATE_STARTWAIT);
                break;
        case 12:
                dev_warn(privptr->dev,
                        "The IUCV device failed to connect to the peer on z/VM"
-                       " guest %s\n", netiucv_printname(conn->userid));
+                       " guest %s\n", netiucv_printname(conn->userid, 8));
                fsm_newstate(fi, CONN_STATE_STARTWAIT);
                break;
        case 13:
@@ -927,7 +960,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
                dev_err(privptr->dev,
                        "z/VM guest %s has too many IUCV connections"
                        " to connect with the IUCV device\n",
-                       netiucv_printname(conn->userid));
+                       netiucv_printname(conn->userid, 8));
                fsm_newstate(fi, CONN_STATE_CONNERR);
                break;
        case 15:
@@ -972,7 +1005,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
        netiucv_purge_skb_queue(&conn->collect_queue);
        if (conn->path) {
                IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
-               iucv_path_sever(conn->path, iucvMagic);
+               iucv_path_sever(conn->path, conn->userdata);
                kfree(conn->path);
                conn->path = NULL;
        }
@@ -1090,7 +1123,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
                        fsm_newstate(fi, DEV_STATE_RUNNING);
                        dev_info(privptr->dev,
                                "The IUCV device has been connected"
-                               " successfully to %s\n", privptr->conn->userid);
+                               " successfully to %s\n",
+                               netiucv_printuser(privptr->conn));
                        IUCV_DBF_TEXT(setup, 3,
                                "connection is up and running\n");
                        break;
@@ -1452,45 +1486,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
        struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
-       return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+       return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
 }
 
-static ssize_t user_write(struct device *dev, struct device_attribute *attr,
-                         const char *buf, size_t count)
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+                             char *userdata)
 {
-       struct netiucv_priv *priv = dev_get_drvdata(dev);
-       struct net_device *ndev = priv->conn->netdev;
-       char    *p;
-       char    *tmp;
-       char    username[9];
-       int     i;
-       struct iucv_connection *cp;
+       const char *p;
+       int i;
 
-       IUCV_DBF_TEXT(trace, 3, __func__);
-       if (count > 9) {
-               IUCV_DBF_TEXT_(setup, 2,
-                              "%d is length of username\n", (int) count);
+       p = strchr(buf, '.');
+       if ((p && ((count > 26) ||
+                  ((p - buf) > 8) ||
+                  (buf + count - p > 18))) ||
+           (!p && (count > 9))) {
+               IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
                return -EINVAL;
        }
 
-       tmp = strsep((char **) &buf, "\n");
-       for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
-               if (isalnum(*p) || (*p == '$')) {
-                       username[i]= toupper(*p);
+       for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+               if (isalnum(*p) || *p == '$') {
+                       username[i] = toupper(*p);
                        continue;
                }
-               if (*p == '\n') {
+               if (*p == '\n')
                        /* trailing lf, grr */
                        break;
-               }
                IUCV_DBF_TEXT_(setup, 2,
-                              "username: invalid character %c\n", *p);
+                              "conn_write: invalid character %02x\n", *p);
                return -EINVAL;
        }
        while (i < 8)
                username[i++] = ' ';
        username[8] = '\0';
 
+       if (*p == '.') {
+               p++;
+               for (i = 0; i < 16 && *p; i++, p++) {
+                       if (*p == '\n')
+                               break;
+                       userdata[i] = toupper(*p);
+               }
+               while (i > 0 && i < 16)
+                       userdata[i++] = ' ';
+       } else
+               memcpy(userdata, iucvMagic_ascii, 16);
+       userdata[16] = '\0';
+       ASCEBC(userdata, 16);
+
+       return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->conn->netdev;
+       char    username[9];
+       char    userdata[17];
+       int     rc;
+       struct iucv_connection *cp;
+
+       IUCV_DBF_TEXT(trace, 3, __func__);
+       rc = netiucv_check_user(buf, count, username, userdata);
+       if (rc)
+               return rc;
+
        if (memcmp(username, priv->conn->userid, 9) &&
            (ndev->flags & (IFF_UP | IFF_RUNNING))) {
                /* username changed while the interface is active. */
@@ -1499,15 +1560,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
        }
        read_lock_bh(&iucv_connection_rwlock);
        list_for_each_entry(cp, &iucv_connection_list, list) {
-               if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+               if (!strncmp(username, cp->userid, 9) &&
+                  !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
                        read_unlock_bh(&iucv_connection_rwlock);
-                       IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
-                               "to %s already exists\n", username);
+                       IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+                               "already exists\n", netiucv_printuser(cp));
                        return -EEXIST;
                }
        }
        read_unlock_bh(&iucv_connection_rwlock);
        memcpy(priv->conn->userid, username, 9);
+       memcpy(priv->conn->userdata, userdata, 17);
        return count;
 }
 
@@ -1537,7 +1600,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
        bs1 = simple_strtoul(buf, &e, 0);
 
        if (e && (!isspace(*e))) {
-               IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+               IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+                       *e);
                return -EINVAL;
        }
        if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1864,7 +1928,8 @@ static void netiucv_unregister_device(struct device *dev)
  * Add it to the list of netiucv connections;
  */
 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
-                                                     char *username)
+                                                     char *username,
+                                                     char *userdata)
 {
        struct iucv_connection *conn;
 
@@ -1893,6 +1958,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
        fsm_settimer(conn->fsm, &conn->timer);
        fsm_newstate(conn->fsm, CONN_STATE_INVALID);
 
+       if (userdata)
+               memcpy(conn->userdata, userdata, 17);
        if (username) {
                memcpy(conn->userid, username, 9);
                fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1919,6 +1986,7 @@ out:
  */
 static void netiucv_remove_connection(struct iucv_connection *conn)
 {
+
        IUCV_DBF_TEXT(trace, 3, __func__);
        write_lock_bh(&iucv_connection_rwlock);
        list_del_init(&conn->list);
@@ -1926,7 +1994,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
        fsm_deltimer(&conn->timer);
        netiucv_purge_skb_queue(&conn->collect_queue);
        if (conn->path) {
-               iucv_path_sever(conn->path, iucvMagic);
+               iucv_path_sever(conn->path, conn->userdata);
                kfree(conn->path);
                conn->path = NULL;
        }
@@ -1985,7 +2053,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
 /**
  * Allocate and initialize everything of a net device.
  */
-static struct net_device *netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
 {
        struct netiucv_priv *privptr;
        struct net_device *dev;
@@ -1994,6 +2062,8 @@ static struct net_device *netiucv_init_netdevice(char *username)
                           netiucv_setup_netdevice);
        if (!dev)
                return NULL;
+       if (dev_alloc_name(dev, dev->name) < 0)
+               goto out_netdev;
 
        privptr = netdev_priv(dev);
        privptr->fsm = init_fsm("netiucvdev", dev_state_names,
@@ -2002,7 +2072,7 @@ static struct net_device *netiucv_init_netdevice(char *username)
        if (!privptr->fsm)
                goto out_netdev;
 
-       privptr->conn = netiucv_new_connection(dev, username);
+       privptr->conn = netiucv_new_connection(dev, username, userdata);
        if (!privptr->conn) {
                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
                goto out_fsm;
@@ -2020,47 +2090,31 @@ out_netdev:
 static ssize_t conn_write(struct device_driver *drv,
                          const char *buf, size_t count)
 {
-       const char *p;
        char username[9];
-       int i, rc;
+       char userdata[17];
+       int rc;
        struct net_device *dev;
        struct netiucv_priv *priv;
        struct iucv_connection *cp;
 
        IUCV_DBF_TEXT(trace, 3, __func__);
-       if (count>9) {
-               IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
-               return -EINVAL;
-       }
-
-       for (i = 0, p = buf; i < 8 && *p; i++, p++) {
-               if (isalnum(*p) || *p == '$') {
-                       username[i] = toupper(*p);
-                       continue;
-               }
-               if (*p == '\n')
-                       /* trailing lf, grr */
-                       break;
-               IUCV_DBF_TEXT_(setup, 2,
-                              "conn_write: invalid character %c\n", *p);
-               return -EINVAL;
-       }
-       while (i < 8)
-               username[i++] = ' ';
-       username[8] = '\0';
+       rc = netiucv_check_user(buf, count, username, userdata);
+       if (rc)
+               return rc;
 
        read_lock_bh(&iucv_connection_rwlock);
        list_for_each_entry(cp, &iucv_connection_list, list) {
-               if (!strncmp(username, cp->userid, 9)) {
+               if (!strncmp(username, cp->userid, 9) &&
+                   !strncmp(userdata, cp->userdata, 17)) {
                        read_unlock_bh(&iucv_connection_rwlock);
-                       IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
-                               "to %s already exists\n", username);
+                       IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+                               "already exists\n", netiucv_printuser(cp));
                        return -EEXIST;
                }
        }
        read_unlock_bh(&iucv_connection_rwlock);
 
-       dev = netiucv_init_netdevice(username);
+       dev = netiucv_init_netdevice(username, userdata);
        if (!dev) {
                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
                return -ENODEV;
@@ -2081,8 +2135,9 @@ static ssize_t conn_write(struct device_driver *drv,
        if (rc)
                goto out_unreg;
 
-       dev_info(priv->dev, "The IUCV interface to %s has been"
-               " established successfully\n", netiucv_printname(username));
+       dev_info(priv->dev, "The IUCV interface to %s has been established "
+                           "successfully\n",
+               netiucv_printuser(priv->conn));
 
        return count;
 
index b77c65ed13812f9d7e5462c0bcb5bcb9c1fa33f1..4abc79d3963f86ee2dc7dfd14c1fb240bf2eb540 100644 (file)
@@ -236,8 +236,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 #define QETH_IN_BUF_COUNT_MAX 128
 #define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
 #define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
-               ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \
-                ((card)->qdio.in_buf_pool.buf_count / 2))
+                ((card)->qdio.in_buf_pool.buf_count / 2)
 
 /* buffers we have to be behind before we get a PCI */
 #define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
index 81534437373a3c1209c914db4fb2a32d45860e44..4fae1dc19951306a25ffa437e43d95f3f4dba72a 100644 (file)
@@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum qeth_qdio_buffer_states newbufstate);
-
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 static inline const char *qeth_get_cardname(struct qeth_card *card)
 {
@@ -363,6 +363,9 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
 static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
        int bidx, int forced_cleanup)
 {
+       if (q->card->options.cq != QETH_CQ_ENABLED)
+               return;
+
        if (q->bufs[bidx]->next_pending != NULL) {
                struct qeth_qdio_out_buffer *head = q->bufs[bidx];
                struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
@@ -390,6 +393,13 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
 
                }
        }
+       if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+                                       QETH_QDIO_BUF_HANDLED_DELAYED)) {
+               /* for recovery situations */
+               q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+               qeth_init_qdio_out_buf(q, bidx);
+               QETH_CARD_TEXT(q->card, 2, "clprecov");
+       }
 }
 
 
@@ -412,7 +422,6 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
                notification = TX_NOTIFY_OK;
        } else {
                BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
-
                atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
                notification = TX_NOTIFY_DELAYED_OK;
        }
@@ -425,7 +434,8 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
 
        buffer->aob = NULL;
        qeth_clear_output_buffer(buffer->q, buffer,
-                               QETH_QDIO_BUF_HANDLED_DELAYED);
+                                QETH_QDIO_BUF_HANDLED_DELAYED);
+
        /* from here on: do not touch buffer anymore */
        qdio_release_aob(aob);
 }
@@ -881,7 +891,6 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
 void qeth_schedule_recovery(struct qeth_card *card)
 {
        QETH_CARD_TEXT(card, 2, "startrec");
-       WARN_ON(1);
        if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
                schedule_work(&card->kernel_thread_starter);
 }
@@ -1114,11 +1123,25 @@ out:
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 {
        struct sk_buff *skb;
+       struct iucv_sock *iucv;
+       int notify_general_error = 0;
+
+       if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+               notify_general_error = 1;
+
+       /* release may never happen from within CQ tasklet scope */
+       BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
 
        skb = skb_dequeue(&buf->skb_list);
        while (skb) {
                QETH_CARD_TEXT(buf->q->card, 5, "skbr");
                QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+               if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+                       if (skb->sk) {
+                               iucv = iucv_sk(skb->sk);
+                               iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+                       }
+               }
                atomic_dec(&skb->users);
                dev_kfree_skb_any(skb);
                skb = skb_dequeue(&buf->skb_list);
@@ -1161,7 +1184,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
                if (!q->bufs[j])
                        continue;
-               qeth_cleanup_handled_pending(q, j, free);
+               qeth_cleanup_handled_pending(q, j, 1);
                qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
                if (free) {
                        kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1208,7 +1231,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
        qeth_free_cq(card);
        cancel_delayed_work_sync(&card->buffer_reclaim_work);
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
-               kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
+               dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
        kfree(card->qdio.in_q);
        card->qdio.in_q = NULL;
        /* inbound buffer pool */
@@ -1330,6 +1353,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
 
 static void qeth_start_kernel_thread(struct work_struct *work)
 {
+       struct task_struct *ts;
        struct qeth_card *card = container_of(work, struct qeth_card,
                                        kernel_thread_starter);
        QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1337,9 +1361,15 @@ static void qeth_start_kernel_thread(struct work_struct *work)
        if (card->read.state != CH_STATE_UP &&
            card->write.state != CH_STATE_UP)
                return;
-       if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
-               kthread_run(card->discipline.recover, (void *) card,
+       if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+               ts = kthread_run(card->discipline.recover, (void *)card,
                                "qeth_recover");
+               if (IS_ERR(ts)) {
+                       qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+                       qeth_clear_thread_running_bit(card,
+                               QETH_RECOVER_THREAD);
+               }
+       }
 }
 
 static int qeth_setup_card(struct qeth_card *card)
index a21ae3d549db1bce13b124aea664b14ac8bef525..c1296713311404469026efc04e6e34081d812552 100644 (file)
@@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
        spin_unlock_bh(&card->vlanlock);
 }
 
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_vlan_vid *id;
 
        QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
        if (!vid)
-               return;
+               return 0;
        if (card->info.type == QETH_CARD_TYPE_OSM) {
                QETH_CARD_TEXT(card, 3, "aidOSM");
-               return;
+               return 0;
        }
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "aidREC");
-               return;
+               return 0;
        }
        id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
        if (id) {
@@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                spin_lock_bh(&card->vlanlock);
                list_add_tail(&id->list, &card->vid_list);
                spin_unlock_bh(&card->vlanlock);
+       } else {
+               return -ENOMEM;
        }
+       return 0;
 }
 
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_vlan_vid *id, *tmpid = NULL;
        struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (card->info.type == QETH_CARD_TYPE_OSM) {
                QETH_CARD_TEXT(card, 3, "kidOSM");
-               return;
+               return 0;
        }
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "kidREC");
-               return;
+               return 0;
        }
        spin_lock_bh(&card->vlanlock);
        list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                kfree(tmpid);
        }
        qeth_l2_set_multicast_list(card->dev);
+       return 0;
 }
 
 static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -1169,6 +1173,7 @@ static void __exit qeth_l2_exit(void)
 static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+       qeth_set_allowed_threads(card, 0, 1);
        if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
        qeth_qdio_clear_card(card, 0);
index e4c1176ee25b2fd9942cfad447826611bbc7037f..9648e4e68337d35a05fd12cf997d27411033d493 100644 (file)
@@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
        qeth_l3_free_vlan_addresses6(card, vid);
 }
 
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
 
        set_bit(vid, card->active_vlans);
-       return;
+       return 0;
 }
 
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
        unsigned long flags;
@@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "kidREC");
-               return;
+               return 0;
        }
        spin_lock_irqsave(&card->vlanlock, flags);
        /* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, card->active_vlans);
        spin_unlock_irqrestore(&card->vlanlock, flags);
        qeth_l3_set_multicast_list(card->dev);
+       return 0;
 }
 
 static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2756,11 +2757,13 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
        struct neighbour *n = NULL;
        struct dst_entry *dst;
 
+       rcu_read_lock();
        dst = skb_dst(skb);
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
        if (n) {
                cast_type = n->type;
+               rcu_read_unlock();
                if ((cast_type == RTN_BROADCAST) ||
                    (cast_type == RTN_MULTICAST) ||
                    (cast_type == RTN_ANYCAST))
@@ -2768,6 +2771,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
                else
                        return RTN_UNSPEC;
        }
+       rcu_read_unlock();
+
        /* try something else */
        if (skb->protocol == ETH_P_IPV6)
                return (skb_network_header(skb)[24] == 0xff) ?
@@ -2847,9 +2852,11 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
        }
 
        hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+
+       rcu_read_lock();
        dst = skb_dst(skb);
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
        if (ipv == 4) {
                /* IPv4 */
                hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -2893,6 +2900,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                                QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
                }
        }
+       rcu_read_unlock();
 }
 
 static inline void qeth_l3_hdr_csum(struct qeth_card *card,
@@ -3202,7 +3210,8 @@ static int qeth_l3_stop(struct net_device *dev)
        return 0;
 }
 
-static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct qeth_card *card = dev->ml_priv;
 
@@ -3216,7 +3225,8 @@ static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int qeth_l3_set_features(struct net_device *dev, u32 features)
+static int qeth_l3_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct qeth_card *card = dev->ml_priv;
        u32 changed = dev->features ^ features;
@@ -3482,14 +3492,13 @@ contin:
        else
                netif_carrier_off(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
+               rtnl_lock();
                if (recovery_mode)
                        __qeth_l3_open(card->dev);
-               else {
-                       rtnl_lock();
+               else
                        dev_open(card->dev);
-                       rtnl_unlock();
-               }
                qeth_l3_set_multicast_list(card->dev);
+               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3535,6 +3544,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
                card->info.hwtrap = 1;
        }
        qeth_l3_stop_card(card, recovery_mode);
+       if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+               rtnl_lock();
+               call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+               rtnl_unlock();
+       }
        rc  = ccw_device_set_offline(CARD_DDEV(card));
        rc2 = ccw_device_set_offline(CARD_WDEV(card));
        rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3589,6 +3603,7 @@ static int qeth_l3_recover(void *ptr)
 static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+       qeth_set_allowed_threads(card, 0, 1);
        if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
        qeth_qdio_clear_card(card, 0);
index 0ea2fbfe0e993a42932221d52f0040aaf52b96db..d979bb26522ff8348df72caa8d2c5871deb7f5b4 100644 (file)
@@ -335,10 +335,10 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
                                        QETH_IN_BUF_COUNT_MAX)
                                qeth_realloc_buffer_pool(card,
                                        QETH_IN_BUF_COUNT_MAX);
-                       break;
                } else
                        rc = -EPERM;
-       default:   /* fall through */
+               break;
+       default:
                rc = -EINVAL;
        }
 out:
index 11f07f888223d92748e684b4000155b7e37eae9c..b79576b64f451e72352bbc3c92582f23e9a92191 100644 (file)
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
 {
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
+       /* if previous slave_alloc returned early, there is nothing to do */
+       if (!zfcp_sdev->port)
+               return;
+
        zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
        put_device(&zfcp_sdev->port->dev);
 }
index 5f94d22c491ecbf1c619e79b0e1ff8a75e23ccd4..542668292900fa9fe2e5afca92215f46104e0520 100644 (file)
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_writeb(client, *buf, off);
-
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_writeb(client, *buf, off);
+               if (ret < 0)
                        break;
-               }
-
                len--;
                buf++;
                off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_readb(client, buf, off);
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_readb(client, buf, off);
+               if (ret < 0)
                        break;
-               }
                len--;
                buf++;
                off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
        .remove         = __devexit_p(bbc_i2c_remove),
 };
 
-static int __init bbc_i2c_init(void)
-{
-       return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
-       platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
 
 MODULE_LICENSE("GPL");
index 965a1fccd66a8898270d45756468e868a4f8acc5..4b9939726c342f3b5e32b1cc5f3ebfe5c88ef945 100644 (file)
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
        .remove         = __devexit_p(d7s_remove),
 };
 
-static int __init d7s_init(void)
-{
-       return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
-       platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
index be7b4e56154f9da35b81bee8fcbc2a238180f314..339fd6f65eda7ff3ba3d90914e8b79c73ae15d34 100644 (file)
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
        .remove         = __devexit_p(envctrl_remove),
 };
 
-static int __init envctrl_init(void)
-{
-       return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
-       platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
 
-module_init(envctrl_init);
-module_exit(envctrl_exit);
 MODULE_LICENSE("GPL");
index 73dd4e7afaaa0b84fb8a6fd9de3552cab37a8a19..826157f386943940367f663b20415e2c01fc571f 100644 (file)
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
        .remove         = __devexit_p(flash_remove),
 };
 
-static int __init flash_init(void)
-{
-       return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
-       platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
 
-module_init(flash_init);
-module_exit(flash_cleanup);
 MODULE_LICENSE("GPL");
index ebce9639a26abba176a3aff226a89abc3d13f8bc..0b31658ccde5cd8be14d3b1f54a06670c1c375df 100644 (file)
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
 };
 
 
-static int __init uctrl_init(void)
-{
-       return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
-       platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
 
-module_init(uctrl_init);
-module_exit(uctrl_exit);
 MODULE_LICENSE("GPL");
index 4aa76d6f11dfed870db05bb098f13cf4aa3d361a..705e13e470af18850ae4375533ae4f85495b592c 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
@@ -1109,6 +1110,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
                unique_id++;
        }
 
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                              PCIE_LINK_STATE_CLKPM);
+
        error = pci_enable_device(pdev);
        if (error)
                goto out;
index dba72a4e6a1cd607288ce533c64a0313f132c2b4..1ad0b8225560f89fedae6a9e0f8318666df10fcb 100644 (file)
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
        spin_lock(&session->lock);
        task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
                                 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
-       if (!task) {
+       if (!task || !task->sc) {
                spin_unlock(&session->lock);
                return -EINVAL;
        }
        sc = task->sc;
-       spin_unlock(&session->lock);
 
        if (!blk_rq_cpu_valid(sc->request))
                cpu = smp_processor_id();
        else
                cpu = sc->request->cpu;
 
+       spin_unlock(&session->lock);
+
        p = &per_cpu(bnx2i_percpu, cpu);
        spin_lock(&p->p_work_lock);
        if (unlikely(!p->iothread)) {
index 000294a9df8024e58a16b02e75e22b2d02d7fa0a..36739da8bc15699bac5586055de547e95697e022 100644 (file)
@@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk)
                csk->saddr.sin_addr.s_addr = chba->ipv4addr;
 
        csk->rss_qid = 0;
-       csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+       csk->l2t = t3_l2t_get(t3dev, dst, ndev);
        if (!csk->l2t) {
                pr_err("NO l2t available.\n");
                return -EINVAL;
index ac7a9b1e3e237ade16f38436fecffa2e6ded69bb..5a4a3bfc60cf891d6902f35e8baca702bdc68fe0 100644 (file)
@@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk)
        struct net_device *ndev = cdev->ports[csk->port_id];
        struct port_info *pi = netdev_priv(ndev);
        struct sk_buff *skb = NULL;
+       struct neighbour *n;
        unsigned int step;
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk)
        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
        cxgbi_sock_get(csk);
 
-       csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+       n = dst_get_neighbour_noref(csk->dst);
+       if (!n) {
+               pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+               goto rel_resource;
+       }
+       csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
        if (!csk->l2t) {
                pr_err("%s, cannot alloc l2t.\n", ndev->name);
                goto rel_resource;
index c10f74a566f2d58741e0c2012b15017e8ce07a5f..1d25a87aa47b2b561f1b948eef672e0b92b56ae7 100644 (file)
@@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
        struct net_device *ndev;
        struct cxgbi_device *cdev;
        struct rtable *rt = NULL;
+       struct neighbour *n;
        struct flowi4 fl4;
        struct cxgbi_sock *csk = NULL;
        unsigned int mtu = 0;
@@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                goto err_out;
        }
        dst = &rt->dst;
-       ndev = dst_get_neighbour(dst)->dev;
+       n = dst_get_neighbour_noref(dst);
+       if (!n) {
+               err = -ENODEV;
+               goto rel_rt;
+       }
+       ndev = n->dev;
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
                mtu = ndev->mtu;
                pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
-                       dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+                       n->dev->name, ndev->name, mtu);
        }
 
        cdev = cxgbi_device_find_by_netdev(ndev, &port);
index cefbe44bb84a1293db665510543ab8cccaec1622..8d67467dd9cec100f52b51803fbe943192421a58 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/sysfs.h>
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
                           unsigned int);
 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr);
 
 static bool fcoe_match(struct net_device *netdev);
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
        .notifier_call = fcoe_cpu_callback,
 };
 
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+       .notifier_call = fcoe_dcb_app_notification,
+};
+
 static struct scsi_transport_template *fcoe_nport_scsi_transport;
 static struct scsi_transport_template *fcoe_vport_scsi_transport;
 
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        skb_reset_network_header(skb);
        skb->mac_len = elen;
        skb->protocol = htons(ETH_P_FCOE);
+       skb->priority = port->priority;
+
        if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
            fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
                skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
        stats->InvalidCRCCount++;
        if (stats->InvalidCRCCount < 5)
                printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+       put_cpu();
        return -EINVAL;
 }
 
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
  */
 static void fcoe_dev_setup(void)
 {
+       register_dcbevent_notifier(&dcb_notifier);
        register_netdevice_notifier(&fcoe_notifier);
 }
 
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
  */
 static void fcoe_dev_cleanup(void)
 {
+       unregister_dcbevent_notifier(&dcb_notifier);
        unregister_netdevice_notifier(&fcoe_notifier);
 }
 
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+       struct fcoe_interface *fcoe;
+       struct net_device *real_dev;
+
+       list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+               if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+                       real_dev = vlan_dev_real_dev(fcoe->netdev);
+               else
+                       real_dev = fcoe->netdev;
+
+               if (netdev == real_dev)
+                       return fcoe;
+       }
+       return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr)
+{
+       struct dcb_app_type *entry = ptr;
+       struct fcoe_interface *fcoe;
+       struct net_device *netdev;
+       struct fcoe_port *port;
+       int prio;
+
+       if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+               return NOTIFY_OK;
+
+       netdev = dev_get_by_index(&init_net, entry->ifindex);
+       if (!netdev)
+               return NOTIFY_OK;
+
+       fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+       dev_put(netdev);
+       if (!fcoe)
+               return NOTIFY_OK;
+
+       if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+               prio = ffs(entry->app.priority) - 1;
+       else
+               prio = entry->app.priority;
+
+       if (prio < 0)
+               return NOTIFY_OK;
+
+       if (entry->app.protocol == ETH_P_FIP ||
+           entry->app.protocol == ETH_P_FCOE)
+               fcoe->ctlr.priority = prio;
+
+       if (entry->app.protocol == ETH_P_FCOE) {
+               port = lport_priv(fcoe->ctlr.lp);
+               port->priority = prio;
+       }
+
+       return NOTIFY_OK;
+}
+
 /**
  * fcoe_device_notification() - Handler for net device events
  * @notifier: The context of the notification
@@ -1964,6 +2037,46 @@ static bool fcoe_match(struct net_device *netdev)
        return true;
 }
 
+/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+       int dcbx;
+       u8 fup, up;
+       struct net_device *netdev = fcoe->realdev;
+       struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+       struct dcb_app app = {
+                               .priority = 0,
+                               .protocol = ETH_P_FCOE
+                            };
+
+       /* setup DCB priority attributes. */
+       if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+               dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+               if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+                       app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+                       up = dcb_ieee_getapp_mask(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_ieee_getapp_mask(netdev, &app);
+               } else {
+                       app.selector = DCB_APP_IDTYPE_ETHTYPE;
+                       up = dcb_getapp(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_getapp(netdev, &app);
+               }
+
+               port->priority = ffs(up) ? ffs(up) - 1 : 0;
+               fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+       }
+#endif
+}
+
 /**
  * fcoe_create() - Create a fcoe interface
  * @netdev  : The net_device object the Ethernet interface to create on
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        /* Make this the "master" N_Port */
        fcoe->ctlr.lp = lport;
 
+       /* setup DCB priority attributes. */
+       fcoe_dcb_create(fcoe);
+
        /* add to lports list */
        fcoe_hostlist_add(lport);
 
index c74c4b8e71ef03c0c8a8111efe1152bb5586e26a..e7522dcc296eb8bb9c425da842f2a159d05862f2 100644 (file)
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 
        skb_put(skb, sizeof(*sol));
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
        }
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
        cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
 
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
 
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
 
index e76107b2ade3a2b7a2666daab0c7a0b683e53d74..865d452542be923ef7c02bc0f4eea816dfc898b3 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -3922,6 +3923,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
                dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
                return -ENODEV;
        }
+
+       pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
+                              PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+
        err = pci_enable_device(h->pdev);
        if (err) {
                dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
index 8889b1babcacaebdc97d8333d5bdaebf2dd93b88..d570573b7963ec47179d15ec7ac613221f0ce4bc 100644 (file)
@@ -2802,6 +2802,11 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
 
        if (ioc->is_driver_loading)
                return;
+
+       fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+       if (!fw_event)
+               return;
+
        fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
        fw_event->ioc = ioc;
        _scsih_fw_event_add(ioc, fw_event);
@@ -4330,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        /* insert into event log */
        sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
             sizeof(Mpi2EventDataSasDeviceStatusChange_t);
-       event_reply = kzalloc(sz, GFP_KERNEL);
+       event_reply = kzalloc(sz, GFP_ATOMIC);
        if (!event_reply) {
                printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
index ac326c41e931dcba508c6357708be557986b8d88..6465dae5883a9dcb44ddde60b9c5f6f98fafd89b 100644 (file)
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
        scsi_qla_host_t *vha = shost_priv(shost);
        struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
 
-       if (!base_vha->flags.online)
+       if (!base_vha->flags.online) {
                fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
-       else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
-               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
-       else
+               return;
+       }
+
+       switch (atomic_read(&base_vha->loop_state)) {
+       case LOOP_UPDATE:
+               fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               break;
+       case LOOP_DOWN:
+               if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+                       fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               else
+                       fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_DEAD:
+               fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_READY:
                fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+               break;
+       default:
+               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+               break;
+       }
 }
 
 static int
index 9df4787715c0828df3dadd48cb149550600919e5..f3cddd5800c307e219bb8a8c6a84db569117df03 100644 (file)
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
  * | Module Init and Probe        |       0x0116       |               |
- * | Mailbox commands             |       0x1129       |               |
+ * | Mailbox commands             |       0x112b       |               |
  * | Device Discovery             |       0x2083       |               |
  * | Queue Command and IO tracing |       0x302e       |     0x3008     |
  * | DPC Thread                   |       0x401c       |               |
  * | Async Events                 |       0x5059       |               |
- * | Timer Routines               |       0x600d       |               |
+ * | Timer Routines               |       0x6010       | 0x600e,0x600f  |
  * | User Space Interactions      |       0x709d       |               |
- * | Task Management              |       0x8041       |               |
+ * | Task Management              |       0x8041       | 0x800b         |
  * | AER/EEH                      |       0x900f       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb051       |               |
+ * | ISP82XX Specific             |       0xb052       |               |
  * | MultiQ                       |       0xc00b       |               |
  * | Misc                         |       0xd00b       |               |
  * ----------------------------------------------------------------------
index ce32d8135c9e36335f53722664eab61db46a0f18..c0c11afb685c450e473faaaf711b00d8ec0d7d82 100644 (file)
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
 extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
 extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct fc_bsg_job *);
index f03e915f187729f6b208a0e89398b2a0b8642214..54ea68cec4c58c3cd0ee80c48bfaeb3f96e637d4 100644 (file)
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
                                    &ha->fw_xcb_count, NULL, NULL,
                                    &ha->max_npiv_vports, NULL);
 
-                               if (!fw_major_version && ql2xallocfwdump)
+                               if (!fw_major_version && ql2xallocfwdump
+                                   && !IS_QLA82XX(ha))
                                        qla2x00_alloc_fw_dump(vha);
                        }
                } else {
index dbec89622a0fa09d77ae6ad6c45e7df9237da3e0..a4b267e60a352b7cb72f620bd099ef19e12ddd13 100644 (file)
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
 static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 {
        cont_a64_entry_t *cont_pkt;
 
-       struct req_que *req = vha->req;
        /* Adjust ring index. */
        req->ring_index++;
        if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           vha->hw->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
        int index;
        uint16_t tot_dsds;
         scsi_qla_host_t *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
        struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
        int loop_iterartion = 0;
        int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           ha->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
index 2516adf1aeeaa946372ac137da595d37466418c7..7b91b290ffd6bd23c1b137a7a3c73074f3f1580a 100644 (file)
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                    resid, scsi_bufflen(cp));
 
                                cp->result = DID_ERROR << 16 | lscsi_status;
-                               break;
+                               goto check_scsi_status;
                        }
 
                        if (!lscsi_status &&
index 3b3cec9f6ac295dab131050b8a92d47eadb06999..82a33533ed26c25dc87e90cfe1a35f95c33ccb44 100644 (file)
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                mcp->mb[0] = MBS_LINK_DOWN_ERROR;
                ql_log(ql_log_warn, base_vha, 0x1004,
                    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
-               rval = QLA_FUNCTION_FAILED;
-               goto premature_exit;
+               return QLA_FUNCTION_TIMEOUT;
        }
 
        /*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112a,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101c,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112b,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101e,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort.\n");
index 94bded5ddce4fe2f958dcdb8387c471eeaf790cf..03554934b0a58629848117649737a6fbabc15713 100644 (file)
@@ -3817,6 +3817,20 @@ exit:
        return rval;
 }
 
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ha->flags.mbox_busy) {
+               ha->flags.mbox_int = 1;
+               ha->flags.mbox_busy = 0;
+               ql_log(ql_log_warn, vha, 0x6010,
+                   "Doing premature completion of mbx command.\n");
+               if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+                       complete(&ha->mbx_intr_comp);
+       }
+}
+
 void qla82xx_watchdog(scsi_qla_host_t *vha)
 {
        uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                        qla2xxx_wake_dpc(vha);
                } else {
                        if (qla82xx_check_fw_alive(vha)) {
+                               ql_dbg(ql_dbg_timer, vha, 0x6011,
+                                   "disabling pause transmit on port 0 & 1.\n");
+                               qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                   CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
                                halt_status = qla82xx_rd_32(ha,
                                    QLA82XX_PEG_HALT_STATUS1);
-                               ql_dbg(ql_dbg_timer, vha, 0x6005,
+                               ql_log(ql_log_info, vha, 0x6005,
                                    "dumping hw/fw registers:.\n "
                                    " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
                                    " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                            QLA82XX_CRB_PEG_NET_3 + 0x3c),
                                    qla82xx_rd_32(ha,
                                            QLA82XX_CRB_PEG_NET_4 + 0x3c));
+                               if (LSW(MSB(halt_status)) == 0x67)
+                                       ql_log(ql_log_warn, vha, 0xb052,
+                                           "Firmware aborted with "
+                                           "error code 0x00006700. Device is "
+                                           "being reset.\n");
                                if (halt_status & HALT_STATUS_UNRECOVERABLE) {
                                        set_bit(ISP_UNRECOVERABLE,
                                            &vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                }
                                qla2xxx_wake_dpc(vha);
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       ql_log(ql_log_warn, vha, 0x6007,
-                                           "Due to FW hung, doing "
-                                           "premature completion of mbx "
-                                           "command.\n");
-                                       if (test_bit(MBX_INTR_WAIT,
-                                           &ha->mbx_cmd_flags))
-                                               complete(&ha->mbx_intr_comp);
-                               }
+                               ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+                               qla82xx_clear_pending_mbx(vha);
                        }
                }
        }
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                        msleep(1000);
                        if (qla82xx_check_fw_alive(vha)) {
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       complete(&ha->mbx_intr_comp);
-                               }
+                               qla82xx_clear_pending_mbx(vha);
                                break;
                        }
                }
index 57820c199bc225858b836feb1d38b457dce14430..57a226be339aa2fe438c6a40a5674062552b006c 100644 (file)
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
 
 static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
        0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0        0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1        0x8
+
 #endif
index fd14c7bfc62665f698d9950210dfa3b2e1cf1f42..f9e5b85e84d83e6d147eebca647e2e986358cc0c 100644 (file)
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
                "Set the Minidump driver capture mask level. "
                "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 
-int ql2xmdenable;
+int ql2xmdenable = 1;
 module_param(ql2xmdenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmdenable,
                "Enable/disable MiniDump. "
-               "0 (Default) - MiniDump disabled. "
-               "1 - MiniDump enabled.");
+               "0 - MiniDump disabled. "
+               "1 (Default) - MiniDump enabled.");
 
 /*
  * SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
        qla25xx_delete_queues(vha);
        destroy_workqueue(ha->wq);
        ha->wq = NULL;
+       vha->req = ha->req_q_map[0];
 fail:
        ha->mqenable = 0;
        kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
        return return_status;
 }
 
-/*
- * qla2x00_wait_for_loop_ready
- *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- *    to be in LOOP_READY state.
- * Input:
- *     ha - pointer to host adapter structure
- *
- * Note:
- *    Does context switching-Release SPIN_LOCK
- *    (if any) before calling this routine.
- *
- *
- * Return:
- *    Success (LOOP_READY) : 0
- *    Failed  (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
-       int      return_status = QLA_SUCCESS;
-       unsigned long loop_timeout ;
-       struct qla_hw_data *ha = vha->hw;
-       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
-       /* wait for 5 min at the max for loop to be ready */
-       loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
-       while ((!atomic_read(&base_vha->loop_down_timer) &&
-           atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
-           atomic_read(&base_vha->loop_state) != LOOP_READY) {
-               if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-               msleep(1000);
-               if (time_after_eq(jiffies, loop_timeout)) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-       }
-       return (return_status);
-}
-
 static void
 sp_get(struct srb *sp)
 {
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                    "Wait for hba online failed for cmd=%p.\n", cmd);
                goto eh_reset_failed;
        }
-       err = 1;
-       if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
-               ql_log(ql_log_warn, vha, 0x800b,
-                   "Wait for loop ready failed for cmd=%p.\n", cmd);
-               goto eh_reset_failed;
-       }
        err = 2;
        if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
                != QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
                goto eh_bus_reset_done;
        }
 
-       if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
-               if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
-                       ret = SUCCESS;
-       }
+       if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+               ret = SUCCESS;
+
        if (ret == FAILED)
                goto eh_bus_reset_done;
 
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
                goto eh_host_reset_lock;
 
-       /*
-        * Fixme-may be dpc thread is active and processing
-        * loop_resync,so wait a while for it to
-        * be completed and then issue big hammer.Otherwise
-        * it may cause I/O failure as big hammer marks the
-        * devices as lost kicking of the port_down_timer
-        * while dpc is stuck for the mailbox to complete.
-        */
-       qla2x00_wait_for_loop_ready(vha);
        if (vha != base_vha) {
                if (qla2x00_vp_abort_isp(vha))
                        goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
                atomic_set(&vha->loop_state, LOOP_DOWN);
                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
                qla2x00_mark_all_devices_lost(vha, 0);
-               qla2x00_wait_for_loop_ready(vha);
        }
 
        if (ha->flags.enable_lip_reset) {
                ret = qla2x00_lip_reset(vha);
-               if (ret != QLA_SUCCESS) {
+               if (ret != QLA_SUCCESS)
                        ql_dbg(ql_dbg_taskm, vha, 0x802e,
                            "lip_reset failed (%d).\n", ret);
-               } else
-                       qla2x00_wait_for_loop_ready(vha);
        }
 
        /* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                /* For ISP82XX complete any pending mailbox cmd */
                if (IS_QLA82XX(ha)) {
                        ha->flags.isp82xx_fw_hung = 1;
-                       if (ha->flags.mbox_busy) {
-                               ha->flags.mbox_int = 1;
-                               ql_dbg(ql_dbg_aer, vha, 0x9001,
-                                   "Due to pci channel io frozen, doing premature "
-                                   "completion of mbx command.\n");
-                               complete(&ha->mbx_intr_comp);
-                       }
+                       ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+                       qla82xx_clear_pending_mbx(vha);
                }
                qla2x00_free_irqs(vha);
                pci_disable_device(pdev);
index 13b6357c1fa2ae297c811b7588b3fb1172f4dd3f..23f33a6d52d7278b5cf5d1d42a5bf44330aba183 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.07-k"
+#define QLA2XXX_VERSION      "8.03.07.12-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
index ace637bf254e1ca2b8c9d8be7490f41027945044..fd5edc6e166dec140854e737912fec75a4eebf20 100644 (file)
 #define ISCSI_ALIAS_SIZE               32      /* ISCSI Alias name size */
 #define ISCSI_NAME_SIZE                        0xE0    /* ISCSI Name size */
 
-#define QL4_SESS_RECOVERY_TMO          30      /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO          120     /* iSCSI session */
                                                /* recovery timeout */
 
 #define LSDW(x) ((u32)((u64)(x)))
 #define ISNS_DEREG_TOV                 5
 #define HBA_ONLINE_TOV                 30
 #define DISABLE_ACB_TOV                        30
+#define IP_CONFIG_TOV                  30
+#define LOGIN_TOV                      12
 
 #define MAX_RESET_HA_RETRIES           2
 
@@ -240,6 +242,45 @@ struct ddb_entry {
 
        uint16_t fw_ddb_index;  /* DDB firmware index */
        uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
+       uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+       struct dev_db_entry fw_ddb_entry;
+       int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+       int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                         struct ddb_entry *ddb_entry, uint32_t state);
+
+       /* Driver Re-login  */
+       unsigned long flags;              /* DDB Flags */
+       uint16_t default_relogin_timeout; /*  Max time to wait for
+                                          *  relogin to complete */
+       atomic_t retry_relogin_timer;     /* Min Time between relogins
+                                          * (4000 only) */
+       atomic_t relogin_timer;           /* Max Time to wait for
+                                          * relogin to complete */
+       atomic_t relogin_retry_count;     /* Num of times relogin has been
+                                          * retried */
+       uint32_t default_time2wait;       /* Default Min time between
+                                          * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+       struct list_head list;
+       uint16_t fw_ddb_idx;
+       struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+       int port;
+       int tpgt;
+       char ip_addr[DDB_IPADDR_LEN];
+       char iscsi_name[ISCSI_NAME_SIZE];
+       uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
 };
 
 /*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
 #define AF_FW_RECOVERY                 19 /* 0x00080000 */
 #define AF_EEH_BUSY                    20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST              22 /* 0x00400000 */
        unsigned long dpc_flags;
 
 #define DPC_RESET_HA                   1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
        uint16_t bootload_minor;
        uint16_t bootload_patch;
        uint16_t bootload_build;
+       uint16_t def_timeout; /* Default login timeout */
 
        uint32_t flash_state;
 #define        QLFLASH_WAITING         0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
        uint16_t iscsi_pci_func_cnt;
        uint8_t model_name[16];
        struct completion disable_acb_comp;
+       struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+       uint16_t pri_ddb_idx;
+       uint16_t sec_ddb_idx;
+       int is_reset;
 };
 
 struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
 /*---------------------------------------------------------------------------*/
 
 /* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER    0
+#define RESET_ADAPTER   1
+
 #define PRESERVE_DDB_LIST      0
 #define REBUILD_DDB_LIST       1
 
index cbd5a20dbbd150c7a1fbe4840b7a547a1097dc2a..4ac07f882521307ef0f018806408e4dded971499 100644 (file)
@@ -12,6 +12,7 @@
 #define MAX_PRST_DEV_DB_ENTRIES                64
 #define MIN_DISC_DEV_DB_ENTRY          MAX_PRST_DEV_DB_ENTRIES
 #define MAX_DEV_DB_ENTRIES             512
+#define MAX_DEV_DB_ENTRIES_40XX                256
 
 /*************************************************************************
  *
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
        uint8_t res14[140];     /* 274-2FF */
 };
 
+#define IP_ADDR_COUNT  4 /* Total 4 IP address supported in one interface
+                          * One IPv4, one IPv6 link local and 2 IPv6
+                          */
+
+#define IP_STATE_MASK  0x0F000000
+#define IP_STATE_SHIFT 24
+
 struct init_fw_ctrl_blk {
        struct addr_ctrl_blk pri;
 /*     struct addr_ctrl_blk sec;*/
index 160db9d5ea2101e8ccb2ef4772c81429747a4b0d..d0dd4b33020643dd5bad1cf7d3c30a1d5d02575c 100644 (file)
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
 int qla4xxx_hw_reset(struct scsi_qla_host *ha);
 int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
 int qla4xxx_soft_reset(struct scsi_qla_host *ha);
 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
 
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                          uint32_t *mbx_sts);
 int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
 int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
 int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
                          uint16_t stats_size, dma_addr_t stats_dma);
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry);
 int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
                            struct dev_db_entry *fw_ddb_entry,
                            dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
 int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
                                     uint32_t region, uint32_t field0,
                                     uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
 
 /* BSG Functions */
 int qla4xxx_bsg_request(struct bsg_job *bsg_job);
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
index 3075fbaef5533d6574cedce72f9c24b722696afa..1bdfa8120ac888c65c304c28dc3f3aba806ea403 100644 (file)
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
  * be freed so that when login happens from user space there are free DDB
  * indices available.
  **/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
 {
        int max_ddbs;
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
 
-       max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
                                     MAX_DEV_DB_ENTRIES;
 
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
                ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
                                              &next_idx, &state, &conn_err,
                                                NULL, NULL);
-               if (ret == QLA_ERROR)
+               if (ret == QLA_ERROR) {
+                       next_idx++;
                        continue;
+               }
                if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
                    state == DDB_DS_SESSION_FAILED) {
                        DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
        }
 }
 
-
 /**
  * qla4xxx_initialize_adapter - initiailizes hba
  * @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
  * This routine parforms all of the steps necessary to initialize the adapter.
  *
  **/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
 {
        int status = QLA_ERROR;
 
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
        if (status == QLA_ERROR)
                goto exit_init_hba;
 
-       qla4xxx_free_ddb_index(ha);
+       if (is_reset == RESET_ADAPTER)
+               qla4xxx_build_ddb_list(ha, is_reset);
 
        set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
        return status;
 }
 
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
-               uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state)
 {
-       struct ddb_entry * ddb_entry;
        uint32_t old_fw_ddb_device_state;
        int status = QLA_ERROR;
 
-       /* check for out of range index */
-       if (fw_ddb_index >= MAX_DDB_ENTRIES)
-               goto exit_ddb_event;
-
-       /* Get the corresponging ddb entry */
-       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
-       /* Device does not currently exist in our database. */
-       if (ddb_entry == NULL) {
-               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
-                          __func__, fw_ddb_index);
-
-               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
-                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
-               goto exit_ddb_event;
-       }
-
        old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
        DEBUG2(ql4_printk(KERN_INFO, ha,
                          "%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                                __func__));
                break;
        }
+       return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+       /*
+        * This triggers a relogin.  After the relogin_timer
+        * expires, the relogin gets scheduled.  We must wait a
+        * minimum amount of time since receiving an 0x8014 AEN
+        * with failed device_state or a logout response before
+        * we can issue another relogin.
+        *
+        * Firmware pads this timeout: (time2wait +1).
+        * Driver retry to login should be longer than F/W.
+        * Otherwise F/W will fail
+        * set_ddb() mbx cmd with 0x4005 since it still
+        * counting down its time2wait.
+        */
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->retry_relogin_timer,
+                  ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state)
+{
+       uint32_t old_fw_ddb_device_state;
+       int status = QLA_ERROR;
+
+       old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: DDB - old state = 0x%x, new state = 0x%x for "
+                         "index [%d]\n", __func__,
+                         ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+       ddb_entry->fw_ddb_device_state = state;
+
+       switch (old_fw_ddb_device_state) {
+       case DDB_DS_LOGIN_IN_PROCESS:
+       case DDB_DS_NO_CONNECTION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_FAILED:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       default:
+               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+                                 __func__));
+               break;
+       }
+       return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+                               uint32_t fw_ddb_index,
+                               uint32_t state, uint32_t conn_err)
+{
+       struct ddb_entry *ddb_entry;
+       int status = QLA_ERROR;
+
+       /* check for out of range index */
+       if (fw_ddb_index >= MAX_DDB_ENTRIES)
+               goto exit_ddb_event;
+
+       /* Get the corresponging ddb entry */
+       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+       /* Device does not currently exist in our database. */
+       if (ddb_entry == NULL) {
+               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+                          __func__, fw_ddb_index);
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+               goto exit_ddb_event;
+       }
+
+       ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
 
 exit_ddb_event:
        return status;
 }
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+       struct dev_db_entry *fw_ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       uint32_t mbx_sts = 0;
+       int ret;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha =  ddb_entry->ha;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags))
+               return;
+
+       if (ddb_entry->ddb_type != FLASH_DDB) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Skipping login to non FLASH DB"));
+               goto exit_login;
+       }
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_login;
+       }
+
+       if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+               ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+               if (ret == QLA_ERROR)
+                       goto exit_login;
+
+               ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+               ha->tot_ddbs++;
+       }
+
+       memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+              sizeof(struct dev_db_entry));
+       ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+       ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+                                   fw_ddb_dma, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+               goto exit_login;
+       }
+
+       ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+       ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+       if (ret == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+                          sess->targetname);
+               goto exit_login;
+       }
+
+exit_login:
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
index 4c2b84870392e16f1ed03428aa7ed9c004c9cbd9..c2593782fbbef8c203148b1661c92a3e1dbe35f6 100644 (file)
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                return status;
        }
 
+       if (is_qla40XX(ha)) {
+               if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+                       DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+                                         "prematurely completing mbx cmd as "
+                                         "adapter removal detected\n",
+                                         ha->host_no, __func__));
+                       return status;
+               }
+       }
+
        if (is_qla8022(ha)) {
                if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
                        DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
        memcpy(ha->name_string, init_fw_cb->iscsi_name,
                min(sizeof(ha->name_string),
                sizeof(init_fw_cb->iscsi_name)));
+       ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
        /*memcpy(ha->alias, init_fw_cb->Alias,
               min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
 
index 30f31b127f33750dd384770cb5b1ba1b3e33807d..4169c8baa112a41266b4a05335290f7b1fdf90fd 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
 /*
  * Module parameter information and variables
  */
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+               "Set to disable exporting boot targets to sysfs\n"
+               " 0 - Export boot targets\n"
+               " 1 - Do not export boot targets (Default)");
+
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                "Target Session Recovery Timeout.\n"
-               " Default: 30 sec.");
+               " Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
        qla_ep = ep->dd_data;
        ha = to_qla_host(qla_ep->host);
 
-       if (adapter_up(ha))
+       if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
                ret = 1;
 
        return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
 
 }
 
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+       uint32_t mbx_sts = 0;
+       uint16_t tmp_ddb_index;
+       int ret;
+
+get_ddb_index:
+       tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+       if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free DDB index not available\n"));
+               ret = QLA_ERROR;
+               goto exit_get_ddb_index;
+       }
+
+       if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+               goto get_ddb_index;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Found a free DDB index at %d\n", tmp_ddb_index));
+       ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "DDB index = %d not available trying next\n",
+                                  tmp_ddb_index);
+                       goto get_ddb_index;
+               }
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free FW DDB not available\n"));
+       }
+
+       *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+       return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+                                  struct ddb_entry *ddb_entry,
+                                  char *existing_ipaddr,
+                                  char *user_ipaddr)
+{
+       uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+       char formatted_ipaddr[DDB_IPADDR_LEN];
+       int status = QLA_SUCCESS, ret = 0;
+
+       if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+               ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+       } else {
+               ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+       }
+
+       if (strcmp(existing_ipaddr, formatted_ipaddr))
+               status = QLA_ERROR;
+
+out_match:
+       return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+                                     struct iscsi_cls_conn *cls_conn)
+{
+       int idx = 0, max_ddbs, rval;
+       struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+       struct iscsi_session *sess, *existing_sess;
+       struct iscsi_conn *conn, *existing_conn;
+       struct ddb_entry *ddb_entry;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       if (sess->targetname == NULL ||
+           conn->persistent_address == NULL ||
+           conn->persistent_port == 0)
+               return QLA_ERROR;
+
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       for (idx = 0; idx < max_ddbs; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               if (ddb_entry->ddb_type != FLASH_DDB)
+                       continue;
+
+               existing_sess = ddb_entry->sess->dd_data;
+               existing_conn = ddb_entry->conn->dd_data;
+
+               if (existing_sess->targetname == NULL ||
+                   existing_conn->persistent_address == NULL ||
+                   existing_conn->persistent_port == 0)
+                       continue;
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IQN = %s User IQN = %s\n",
+                                 existing_sess->targetname,
+                                 sess->targetname));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IP = %s User IP = %s\n",
+                                 existing_conn->persistent_address,
+                                 conn->persistent_address));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Port = %d User Port = %d\n",
+                                 existing_conn->persistent_port,
+                                 conn->persistent_port));
+
+               if (strcmp(existing_sess->targetname, sess->targetname))
+                       continue;
+               rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+                                       existing_conn->persistent_address,
+                                       conn->persistent_address);
+               if (rval == QLA_ERROR)
+                       continue;
+               if (existing_conn->persistent_port != conn->persistent_port)
+                       continue;
+               break;
+       }
+
+       if (idx == max_ddbs)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match found in fwdb sessions\n"));
+       return QLA_SUCCESS;
+}
+
 static struct iscsi_cls_session *
 qla4xxx_session_create(struct iscsi_endpoint *ep,
                        uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        struct scsi_qla_host *ha;
        struct qla_endpoint *qla_ep;
        struct ddb_entry *ddb_entry;
-       uint32_t ddb_index;
-       uint32_t mbx_sts = 0;
+       uint16_t ddb_index;
        struct iscsi_session *sess;
        struct sockaddr *dst_addr;
        int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
        ha = to_qla_host(qla_ep->host);
 
-get_ddb_index:
-       ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
-       if (ddb_index >= MAX_DDB_ENTRIES) {
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free DDB index not available\n"));
-               return NULL;
-       }
-
-       if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
-               goto get_ddb_index;
-
-       DEBUG2(ql4_printk(KERN_INFO, ha,
-                         "Found a free DDB index at %d\n", ddb_index));
-       ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
-       if (ret == QLA_ERROR) {
-               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
-                       ql4_printk(KERN_INFO, ha,
-                                  "DDB index = %d not available trying next\n",
-                                  ddb_index);
-                       goto get_ddb_index;
-               }
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free FW DDB not available\n"));
+       ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+       if (ret == QLA_ERROR)
                return NULL;
-       }
 
        cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
                                       cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
        ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
        ddb_entry->ha = ha;
        ddb_entry->sess = cls_sess;
+       ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+       ddb_entry->ddb_change = qla4xxx_ddb_change;
        cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
        ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
        ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
        DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
        cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
                                    conn_idx);
+       if (!cls_conn)
+               return NULL;
+
        sess = cls_sess->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
        struct scsi_qla_host *ha;
-       struct dev_db_entry *fw_ddb_entry;
+       struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint32_t mbx_sts = 0;
        int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        ddb_entry = sess->dd_data;
        ha = ddb_entry->ha;
 
+       /* Check if we have  matching FW DDB, if yes then do not
+        * login to this target. This could cause target to logout previous
+        * connection
+        */
+       ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+       if (ret == QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "Session already exist in FW.\n");
+               ret = -EEXIST;
+               goto exit_conn_start;
+       }
+
        fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
                                          &fw_ddb_entry_dma, GFP_KERNEL);
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto exit_conn_start;
        }
 
        ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
                if (mbx_sts)
                        if (ddb_entry->fw_ddb_device_state ==
                                                DDB_DS_SESSION_ACTIVE) {
-                               iscsi_conn_start(ddb_entry->conn);
-                               iscsi_conn_login_event(ddb_entry->conn,
-                                               ISCSI_CONN_STATE_LOGGED_IN);
+                               ddb_entry->unblock_sess(ddb_entry->sess);
                                goto exit_set_param;
                        }
 
@@ -1167,8 +1311,9 @@ exit_set_param:
        ret = 0;
 
 exit_conn_start:
-       dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
-                         fw_ddb_entry, fw_ddb_entry_dma);
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
        return ret;
 }
 
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
        return -ENOSYS;
 }
 
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry,
+                                    struct iscsi_cls_session *cls_sess,
+                                    struct iscsi_cls_conn *cls_conn)
+{
+       int buflen = 0;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+       char ip_addr[DDB_IPADDR_LEN];
+       uint16_t options = 0;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       conn->max_recv_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+       conn->max_xmit_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+       sess->initial_r2t_en =
+                           (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+       sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->first_burst = BYTE_UNITS *
+                              le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+       sess->max_burst = BYTE_UNITS *
+                                le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+       sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+       sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+       conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+       sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+                       (char *)fw_ddb_entry->iscsi_name, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+                       (char *)ha->name_string, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+                       (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       uint32_t ddb_state;
+       dma_addr_t fw_ddb_entry_dma;
+       struct dev_db_entry *fw_ddb_entry;
+
+       fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                         &fw_ddb_entry_dma, GFP_KERNEL);
+       if (!fw_ddb_entry) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Unable to allocate dma buffer\n", __func__);
+               goto exit_session_conn_fwddb_param;
+       }
+
+       if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+                                   fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+                                   NULL, NULL, NULL) == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+                                 "get_ddb_entry for fw_ddb_index %d\n",
+                                 ha->host_no, __func__,
+                                 ddb_entry->fw_ddb_index));
+               goto exit_session_conn_fwddb_param;
+       }
+
+       cls_sess = ddb_entry->sess;
+
+       cls_conn = ddb_entry->conn;
+
+       /* Update params */
+       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
+}
+
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry)
 {
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return;
+               goto exit_session_conn_param;
        }
 
        if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                  "get_ddb_entry for fw_ddb_index %d\n",
                                  ha->host_no, __func__,
                                  ddb_entry->fw_ddb_index));
-               return;
+               goto exit_session_conn_param;
        }
 
        cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        cls_conn = ddb_entry->conn;
        conn = cls_conn->dd_data;
 
+       /* Update timers after login */
+       ddb_entry->default_relogin_timeout =
+                               le16_to_cpu(fw_ddb_entry->def_timeout);
+       ddb_entry->default_time2wait =
+                               le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
        /* Update params */
        conn->max_recv_dlength = BYTE_UNITS *
                          le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 
        memcpy(sess->initiatorname, ha->name_string,
               min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 /*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                vfree(ha->chap_list);
        ha->chap_list = NULL;
 
+       if (ha->fw_ddb_dma_pool)
+               dma_pool_destroy(ha->fw_ddb_dma_pool);
+
        /* release io space registers  */
        if (is_qla8022(ha)) {
                if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
                goto mem_alloc_error_exit;
        }
 
+       ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+                                             DDB_DMA_BLOCK_SIZE, 8, 0);
+
+       if (ha->fw_ddb_dma_pool == NULL) {
+               ql4_printk(KERN_WARNING, ha,
+                          "%s: fw_ddb_dma_pool allocation failed..\n",
+                          __func__);
+               goto mem_alloc_error_exit;
+       }
+
        return QLA_SUCCESS;
 
 mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
        }
 }
 
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+                   INVALID_ENTRY) {
+                       if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+                                       0) {
+                               atomic_set(&ddb_entry->retry_relogin_timer,
+                                          INVALID_ENTRY);
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                      "%s: index [%d] login device\n",
+                                       __func__, ddb_entry->fw_ddb_index));
+                       } else
+                               atomic_dec(&ddb_entry->retry_relogin_timer);
+               }
+       }
+
+       /* Wait for relogin to timeout */
+       if (atomic_read(&ddb_entry->relogin_timer) &&
+           (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+               /*
+                * If the relogin times out and the device is
+                * still NOT ONLINE then try and relogin again.
+                */
+               if (!iscsi_is_session_online(cls_sess)) {
+                       /* Reset retry relogin timer */
+                       atomic_inc(&ddb_entry->relogin_retry_count);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                               "%s: index[%d] relogin timed out-retrying"
+                               " relogin (%d), retry (%d)\n", __func__,
+                               ddb_entry->fw_ddb_index,
+                               atomic_read(&ddb_entry->relogin_retry_count),
+                               ddb_entry->default_time2wait + 4));
+                       set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                       atomic_set(&ddb_entry->retry_relogin_timer,
+                                  ddb_entry->default_time2wait + 4);
+               }
+       }
+}
+
 /**
  * qla4xxx_timer - checks every second for work to do.
  * @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
        int start_dpc = 0;
        uint16_t w;
 
+       iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
        /* If we are in the middle of AER/EEH processing
         * skip any processing and reschedule the timer
         */
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
        sess = cls_session->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
-       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+       if (ddb_entry->ddb_type == FLASH_DDB)
+               iscsi_block_session(ddb_entry->sess);
+       else
+               iscsi_session_failure(cls_session->dd_data,
+                                     ISCSI_ERR_CONN_FAILED);
 }
 
 /**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
 
                /* NOTE: AF_ONLINE flag set upon successful completion of
                 *       qla4xxx_initialize_adapter */
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
        }
 
        /* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
                        iscsi_unblock_session(ddb_entry->sess);
                } else {
                        /* Trigger relogin */
-                       iscsi_session_failure(cls_session->dd_data,
-                                             ISCSI_ERR_CONN_FAILED);
+                       if (ddb_entry->ddb_type == FLASH_DDB) {
+                               if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                                       qla4xxx_arm_relogin_timer(ddb_entry);
+                       } else
+                               iscsi_session_failure(cls_session->dd_data,
+                                                     ISCSI_ERR_CONN_FAILED);
                }
        }
 }
 
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+
+       iscsi_unblock_session(ddb_entry->sess);
+
+       /* Start scan target */
+       if (test_bit(AF_ONLINE, &ha->flags)) {
+               ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                          " start scan\n", ha->host_no, __func__,
+                          ddb_entry->fw_ddb_index);
+               scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+       }
+       return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock user space session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+       iscsi_conn_start(ddb_entry->conn);
+       iscsi_conn_login_event(ddb_entry->conn,
+                              ISCSI_CONN_STATE_LOGGED_IN);
+
+       return QLA_SUCCESS;
+}
+
 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
 {
        iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
 }
 
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       uint16_t relogin_timer;
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       relogin_timer = max(ddb_entry->default_relogin_timeout,
+                           (uint16_t)RELOGIN_TOV);
+       atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+                         ddb_entry->fw_ddb_index, relogin_timer));
+
+       qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "relogin issued\n"));
+               qla4xxx_relogin_flash_ddb(cls_sess);
+       }
+}
+
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
 {
        if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
        if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
                qla4xxx_get_dhcp_ip_address(ha);
 
+       /* ---- relogin device? --- */
+       if (adapter_up(ha) &&
+           test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+               iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+       }
+
        /* ---- link change? --- */
        if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
                if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
                         * fatal error recovery.  Therefore, the driver must
                         * manually relogin to devices when recovering from
                         * connection failures, logouts, expired KATO, etc. */
-
-                       qla4xxx_relogin_all_devices(ha);
+                       if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+                               qla4xxx_build_ddb_list(ha, ha->is_reset);
+                               iscsi_host_for_each_session(ha->host,
+                                               qla4xxx_login_flash_ddb);
+                       } else
+                               qla4xxx_relogin_all_devices(ha);
                }
        }
 }
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
                          " target ID %d\n", __func__, ddb_index[0],
                          ddb_index[1]));
 
+       ha->pri_ddb_idx = ddb_index[0];
+       ha->sec_ddb_idx = ddb_index[1];
+
 exit_boot_info_free:
        dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
 exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
                return ret;
        }
 
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+
        if (ddb_index[0] == 0xffff)
                goto sec_target;
 
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        struct iscsi_boot_kobj *boot_kobj;
 
        if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
-               return 0;
+               return QLA_ERROR;
+
+       if (ql4xdisablesysfsboot) {
+               ql4_printk(KERN_INFO, ha,
+                          "%s: syfsboot disabled - driver will trigger login"
+                          "and publish session for discovery .\n", __func__);
+               return QLA_SUCCESS;
+       }
+
 
        ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
        if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        if (!boot_kobj)
                goto put_host;
 
-       return 0;
+       return QLA_SUCCESS;
 
 put_host:
        scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
 exit_chap_list:
        dma_free_coherent(&ha->pdev->dev, chap_size,
                        chap_flash_data, chap_dma);
-       return;
 }
 
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+                                 struct ql4_tuple_ddb *tddb)
+{
+       struct scsi_qla_host *ha;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+
+       DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+       ha = ddb_entry->ha;
+       cls_sess = ddb_entry->sess;
+       sess = cls_sess->dd_data;
+       cls_conn = ddb_entry->conn;
+       conn = cls_conn->dd_data;
+
+       tddb->tpgt = sess->tpgt;
+       tddb->port = conn->persistent_port;
+       strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+       strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+                                     struct ql4_tuple_ddb *tddb)
+{
+       uint16_t options = 0;
+
+       tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+       memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+              min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+                                    struct ql4_tuple_ddb *old_tddb,
+                                    struct ql4_tuple_ddb *new_tddb)
+{
+       if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+               return QLA_ERROR;
+
+       if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+               return QLA_ERROR;
+
+       if (old_tddb->port != new_tddb->port)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+                         old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+                         old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+                         new_tddb->ip_addr, new_tddb->iscsi_name));
+
+       return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry)
+{
+       struct ddb_entry *ddb_entry;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int idx;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+                                      struct list_head *list_nt,
+                                      struct dev_db_entry *fw_ddb_entry)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+       /* Free up the normaltargets list */
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               list_del_init(&nt_ddb_idx->list);
+               vfree(nt_ddb_idx);
+       }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry)
+{
+       struct iscsi_endpoint *ep;
+       struct sockaddr_in *addr;
+       struct sockaddr_in6 *addr6;
+       struct sockaddr *dst_addr;
+       char *ip;
+
+       /* TODO: need to destroy on unload iscsi_endpoint*/
+       dst_addr = vmalloc(sizeof(*dst_addr));
+       if (!dst_addr)
+               return NULL;
+
+       if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+               dst_addr->sa_family = AF_INET6;
+               addr6 = (struct sockaddr_in6 *)dst_addr;
+               ip = (char *)&addr6->sin6_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+               addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+       } else {
+               dst_addr->sa_family = AF_INET;
+               addr = (struct sockaddr_in *)dst_addr;
+               ip = (char *)&addr->sin_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+               addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+       }
+
+       ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+       vfree(dst_addr);
+       return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+       if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+               return QLA_ERROR;
+       return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+                                         struct ddb_entry *ddb_entry)
+{
+       ddb_entry->ddb_type = FLASH_DDB;
+       ddb_entry->fw_ddb_index = INVALID_ENTRY;
+       ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+       ddb_entry->ha = ha;
+       ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+       ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+       atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+       ddb_entry->default_relogin_timeout =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+       ddb_entry->default_time2wait =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+       uint32_t idx = 0;
+       uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+       uint32_t sts[MBOX_REG_COUNT];
+       uint32_t ip_state;
+       unsigned long wtime;
+       int ret;
+
+       wtime = jiffies + (HZ * IP_CONFIG_TOV);
+       do {
+               for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+                       if (ip_idx[idx] == -1)
+                               continue;
+
+                       ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+                       if (ret == QLA_ERROR) {
+                               ip_idx[idx] = -1;
+                               continue;
+                       }
+
+                       ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Waiting for IP state for idx = %d, state = 0x%x\n",
+                                         ip_idx[idx], ip_state));
+                       if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+                           ip_state == IP_ADDRSTATE_INVALID ||
+                           ip_state == IP_ADDRSTATE_PREFERRED ||
+                           ip_state == IP_ADDRSTATE_DEPRICATED ||
+                           ip_state == IP_ADDRSTATE_DISABLING)
+                               ip_idx[idx] = -1;
+
+               }
+
+               /* Break if all IP states checked */
+               if ((ip_idx[0] == -1) &&
+                   (ip_idx[1] == -1) &&
+                   (ip_idx[2] == -1) &&
+                   (ip_idx[3] == -1))
+                       break;
+               schedule_timeout_uninterruptible(HZ);
+       } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+       int max_ddbs;
+       int ret;
+       uint32_t idx = 0, next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       uint16_t conn_id;
+       struct dev_db_entry *fw_ddb_entry;
+       struct ddb_entry *ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_session *sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_endpoint *ep;
+       uint16_t cmds_max = 32, tmo = 0;
+       uint32_t initial_cmdsn = 0;
+       struct list_head list_st, list_nt; /* List of sendtargets */
+       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+       int fw_idx_size;
+       unsigned long wtime;
+       struct qla_ddb_index  *nt_ddb_idx;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags)) {
+               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+               ha->is_reset = is_reset;
+               return;
+       }
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_ddb_list;
+       }
+
+       INIT_LIST_HEAD(&list_st);
+       INIT_LIST_HEAD(&list_nt);
+       fw_idx_size = sizeof(struct qla_ddb_index);
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_st;
+
+               /* Check if ST, add to the list_st */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+                       goto continue_next_st;
+
+               st_ddb_idx = vzalloc(fw_idx_size);
+               if (!st_ddb_idx)
+                       break;
+
+               st_ddb_idx->fw_ddb_idx = idx;
+
+               list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+               if (next_idx == 0)
+                       break;
+       }
+
+       /* Before issuing conn open mbox, ensure all IPs states are configured
+        * Note, conn open fails if IPs are not configured
+        */
+       qla4xxx_wait_for_ip_configuration(ha);
+
+       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       }
+
+       /* Wait to ensure all sendtargets are done for min 12 sec wait */
+       tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Default time to wait for build ddb %d\n", tmo));
+
+       wtime = jiffies + (HZ * tmo);
+       do {
+               list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+                                        list) {
+                       ret = qla4xxx_get_fwddb_entry(ha,
+                                                     st_ddb_idx->fw_ddb_idx,
+                                                     NULL, 0, NULL, &next_idx,
+                                                     &state, &conn_err, NULL,
+                                                     NULL);
+                       if (ret == QLA_ERROR)
+                               continue;
+
+                       if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                           state == DDB_DS_SESSION_FAILED) {
+                               list_del_init(&st_ddb_idx->list);
+                               vfree(st_ddb_idx);
+                       }
+               }
+               schedule_timeout_uninterruptible(HZ / 10);
+       } while (time_after(wtime, jiffies));
+
+       /* Free up the sendtargets list */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               list_del_init(&st_ddb_idx->list);
+               vfree(st_ddb_idx);
+       }
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_nt;
+
+               /* Check if NT, then add to list it */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+                       goto continue_next_nt;
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Adding  DDB to session = 0x%x\n",
+                                         idx));
+                       if (is_reset == INIT_ADAPTER) {
+                               nt_ddb_idx = vmalloc(fw_idx_size);
+                               if (!nt_ddb_idx)
+                                       break;
+
+                               nt_ddb_idx->fw_ddb_idx = idx;
+
+                               memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+                                      sizeof(struct dev_db_entry));
+
+                               if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+                                               fw_ddb_entry) == QLA_SUCCESS) {
+                                       vfree(nt_ddb_idx);
+                                       goto continue_next_nt;
+                               }
+                               list_add_tail(&nt_ddb_idx->list, &list_nt);
+                       } else if (is_reset == RESET_ADAPTER) {
+                               if (qla4xxx_is_session_exists(ha,
+                                                  fw_ddb_entry) == QLA_SUCCESS)
+                                       goto continue_next_nt;
+                       }
+
+                       /* Create session object, with INVALID_ENTRY,
+                        * the targer_id would get set when we issue the login
+                        */
+                       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+                                               ha->host, cmds_max,
+                                               sizeof(struct ddb_entry),
+                                               sizeof(struct ql4_task_data),
+                                               initial_cmdsn, INVALID_ENTRY);
+                       if (!cls_sess)
+                               goto exit_ddb_list;
+
+                       /*
+                        * iscsi_session_setup increments the driver reference
+                        * count which wouldn't let the driver to be unloaded.
+                        * so calling module_put function to decrement the
+                        * reference count.
+                        **/
+                       module_put(qla4xxx_iscsi_transport.owner);
+                       sess = cls_sess->dd_data;
+                       ddb_entry = sess->dd_data;
+                       ddb_entry->sess = cls_sess;
+
+                       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+                       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+                              sizeof(struct dev_db_entry));
+
+                       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+                       cls_conn = iscsi_conn_setup(cls_sess,
+                                                   sizeof(struct qla_conn),
+                                                   conn_id);
+                       if (!cls_conn)
+                               goto exit_ddb_list;
+
+                       ddb_entry->conn = cls_conn;
+
+                       /* Setup ep, for displaying attributes in sysfs */
+                       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+                       if (ep) {
+                               ep->conn = cls_conn;
+                               cls_conn->ep = ep;
+                       } else {
+                               DEBUG2(ql4_printk(KERN_ERR, ha,
+                                                 "Unable to get ep\n"));
+                       }
+
+                       /* Update sess/conn params */
+                       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+                                                cls_conn);
+
+                       if (is_reset == RESET_ADAPTER) {
+                               iscsi_block_session(cls_sess);
+                               /* Use the relogin path to discover new devices
+                                *  by short-circuting the logic of setting
+                                *  timer to relogin - instead set the flags
+                                *  to initiate login right away.
+                                */
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                       }
+               }
+continue_next_nt:
+               if (next_idx == 0)
+                       break;
+       }
+exit_ddb_list:
+       qla4xxx_free_nt_list(&list_nt);
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+       qla4xxx_free_ddb_index(ha);
+}
+
+
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
         * firmware
         * NOTE: interrupts enabled upon successful completion
         */
-       status = qla4xxx_initialize_adapter(ha);
+       status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        while ((!test_bit(AF_ONLINE, &ha->flags)) &&
            init_retry_count++ < MAX_INIT_RETRIES) {
 
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
                if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
                        continue;
 
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        }
 
        if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
               ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
               ha->patch_number, ha->build_number);
 
-       qla4xxx_create_chap_list(ha);
-
        if (qla4xxx_setup_boot_info(ha))
                ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
                           __func__);
 
+               /* Perform the build ddb list and login to each */
+       qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+       iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+       qla4xxx_create_chap_list(ha);
+
        qla4xxx_create_ifaces(ha);
        return 0;
 
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
        }
 }
 
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+       struct ddb_entry *ddb_entry;
+       int options;
+       int idx;
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if ((ddb_entry != NULL) &&
+                   (ddb_entry->ddb_type == FLASH_DDB)) {
+
+                       options = LOGOUT_OPTION_CLOSE_SESSION;
+                       if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+                           == QLA_ERROR)
+                               ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+                                          __func__);
+
+                       qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+                       /*
+                        * we have decremented the reference count of the driver
+                        * when we setup the session to have the driver unload
+                        * to be seamless without actually destroying the
+                        * session
+                        **/
+                       try_module_get(qla4xxx_iscsi_transport.owner);
+                       iscsi_destroy_endpoint(ddb_entry->conn->ep);
+                       qla4xxx_free_ddb(ha, ddb_entry);
+                       iscsi_session_teardown(ddb_entry->sess);
+               }
+       }
+}
 /**
  * qla4xxx_remove_adapter - calback function to remove adapter.
  * @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
        /* destroy iface from sysfs */
        qla4xxx_destroy_ifaces(ha);
 
-       if (ha->boot_kset)
+       if ((!ql4xdisablesysfsboot) && ha->boot_kset)
                iscsi_boot_destroy_kset(ha->boot_kset);
 
+       qla4xxx_destroy_fw_ddb_session(ha);
+
        scsi_remove_host(ha->host);
 
        qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
 
                qla4_8xxx_idc_unlock(ha);
                clear_bit(AF_FW_RECOVERY, &ha->flags);
-               rval = qla4xxx_initialize_adapter(ha);
+               rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                qla4_8xxx_idc_lock(ha);
 
                if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
                    QLA82XX_DEV_READY)) {
                        clear_bit(AF_FW_RECOVERY, &ha->flags);
-                       rval = qla4xxx_initialize_adapter(ha);
+                       rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                        if (rval == QLA_SUCCESS) {
                                ret = qla4xxx_request_irqs(ha);
                                if (ret) {
index c15347d3f532099ef70127371a91caef2d90c3ee..5254e57968f5cf64a24c04731fe9ef2e240c4068 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
index 06bc26554a67359a8656ef976b6d483e71bfd0fb..f85cfa6c47b5212982bcbfcd9065936c27360f3d 100644 (file)
@@ -1409,6 +1409,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
 
        blk_start_request(req);
 
+       scmd_printk(KERN_INFO, cmd, "killing request\n");
+
        sdev = cmd->device;
        starget = scsi_target(sdev);
        shost = sdev->host;
@@ -1490,7 +1492,6 @@ static void scsi_request_fn(struct request_queue *q)
        struct request *req;
 
        if (!sdev) {
-               printk("scsi: killing requests for dead queue\n");
                while ((req = blk_peek_request(q)) != NULL)
                        scsi_kill_request(req, q);
                return;
index 72273a0e56662569931f812b6f628a3e3a4748c8..b3c6d957fbd8aa587dcd039547eaa82bd67b6476 100644 (file)
@@ -319,11 +319,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
        return sdev;
 
 out_device_destroy:
-       scsi_device_set_state(sdev, SDEV_DEL);
-       transport_destroy_device(&sdev->sdev_gendev);
-       put_device(&sdev->sdev_dev);
-       scsi_free_queue(sdev->request_queue);
-       put_device(&sdev->sdev_gendev);
+       __scsi_remove_device(sdev);
 out:
        if (display_failure_msg)
                printk(ALLOC_FAILURE_MSG, __func__);
index a1fd73df5416129c62d8889f87c1d003b0b6b47c..8ba4510a95195392da12a65ce641ea5f164f3c20 100644 (file)
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
        depends on FSL_SOC
 
 config SPI_FSL_SPI
-       tristate "Freescale SPI controller"
+       bool "Freescale SPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
          MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
 
 config SPI_FSL_ESPI
-       tristate "Freescale eSPI controller"
+       bool "Freescale eSPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
index 024b48aed5ca6bba6f32857965e8904fe012ca1b..acc88b4d28693da4d59d315b350f0e717837e607 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
index e093d3ec41ba26f1da89a100e3f8774511e65a21..0094c645ff0d4cc4562412ef020e6ba838966779 100644 (file)
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
        spi_bitbang_cleanup(spi);
 }
 
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
 {
        int value;
 
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
        return value;
 }
 
-static int __init
+static int __devinit
 spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
        u16 *res_flags)
 {
index e763254741c296169b6d662d5f398c1a90170cc2..182e9c873822a64b6e9e25976470044eae8bc338 100644 (file)
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
                goto err_clk;
        }
 
-       mfp_set_groupg(&pdev->dev);
+       mfp_set_groupg(&pdev->dev, NULL);
        nuc900_init_spi(hw);
 
        err = spi_bitbang_start(&hw->bitbang);
index f103e470cb6362e248a264576e457e03f0ab5409..5559b229919870fad59680b722e1390585ba61ca 100644 (file)
@@ -2184,6 +2184,12 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
                goto  err_clk_prep;
        }
 
+       status = clk_enable(pl022->clk);
+       if (status) {
+               dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
+               goto err_no_clk_en;
+       }
+
        /* Disable SSP */
        writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
               SSP_CR1(pl022->virtbase));
@@ -2237,6 +2243,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
 
        free_irq(adev->irq[0], pl022);
  err_no_irq:
+       clk_disable(pl022->clk);
+ err_no_clk_en:
        clk_unprepare(pl022->clk);
  err_clk_prep:
        clk_put(pl022->clk);
index 21d8c1c16cd891e42bf1621d7ff1ba8f0dc5e580..5e78c77d5a08277611c183c92dae273362caed58 100644 (file)
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
        }
 
        insns =
-           kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+           kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
        if (!insns) {
                DPRINTK("kmalloc failed\n");
                ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
        return ret;
 }
 
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+       struct comedi_async *async;
+       struct comedi_device *dev;
+
+       async = area->vm_private_data;
+       dev = async->subdevice->device;
+
+       mutex_lock(&dev->mutex);
+       async->mmap_count++;
+       mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
 {
        struct comedi_async *async;
        struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
 }
 
 static struct vm_operations_struct comedi_vm_ops = {
-       .close = comedi_unmap,
+       .open = comedi_vm_open,
+       .close = comedi_vm_close,
 };
 
 static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_async *async = NULL;
        unsigned long start = vma->vm_start;
        unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
        int i;
        int retval;
        struct comedi_subdevice *s;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+
+       dev_file_info = comedi_get_device_file_info(minor);
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
 {
        unsigned int mask = 0;
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *read_subdev;
        struct comedi_subdevice *write_subdev;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy)
                                break;
                        if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy) {
                                retval = 0;
                                break;
@@ -1885,11 +1924,17 @@ ok:
 static int comedi_close(struct inode *inode, struct file *file)
 {
        const unsigned minor = iminor(inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *s = NULL;
        int i;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
 
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
 static int comedi_fasync(int fd, struct file *file, int on)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
 
-       struct comedi_device *dev = dev_file_info->device;
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        return fasync_helper(fd, file, on, &dev->async_queue);
 }
index a8fea9a9173349799c0f2f629d23ce30412cf356..6144afb8cbaaf92a1ca09dfa5404787d72ee35ed 100644 (file)
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
 #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
 #define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
 /*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
 Description: University of Stirling USB DAQ & INCITE Technology Limited
 Devices: [ITL] USB-DUX (usbduxsigma.o)
 Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
 Status: testing
 */
 /*
@@ -44,6 +44,7 @@ Status: testing
  *   0.3: proper vendor ID and driver name
  *   0.4: fixed D/A voltage range
  *   0.5: various bug fixes, health check at startup
+ *   0.6: corrected wrong input range
  */
 
 /* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
 /* comedi constants */
 static const struct comedi_lrange range_usbdux_ai_range = { 1, {
                                                                BIP_RANGE
-                                                               (2.65)
+                                                               (2.65/2.0)
                                                                }
 };
 
index 9e1864c6dfd08caf355f1e0c3aa347d6b0386beb..8190f2aaf53bb88aaea8df5c025d6f83c2ee68d1 100644 (file)
@@ -1,6 +1,7 @@
 config ET131X
        tristate "Agere ET-1310 Gigabit Ethernet support"
-       depends on PCI
+       depends on PCI && NET && NETDEVICES
+       select PHYLIB
        default n
        ---help---
          This driver supports Agere ET-1310 ethernet adapters.
index f5f44a02456fd5aa20ae06b9b524dee1b1c01757..0c1c6ca8c3794de60d440f3dc33bb60f8ebb2e83 100644 (file)
@@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
+#define ET131X_PM_OPS (&et131x_pm_ops)
+#else
+#define ET131X_PM_OPS NULL
+#endif
+
 /* ISR functions */
 
 /**
@@ -5470,12 +5476,6 @@ err_out:
        return result;
 }
 
-static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
-#define ET131X_PM_OPS (&et131x_pm_ops)
-#else
-#define ET131X_PM_OPS NULL
-#endif
-
 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
        { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
        { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
index 326e967d54ef035258e7d4c325c4d6c26d731edc..aec9311b108c7c73b2b599c42976cbe99e1ca8b2 100644 (file)
@@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = {
 
 static int iio_event_getfd(struct iio_dev *indio_dev)
 {
-       if (indio_dev->event_interface == NULL)
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
+       int fd;
+
+       if (ev_int == NULL)
                return -ENODEV;
 
-       mutex_lock(&indio_dev->event_interface->event_list_lock);
-       if (test_and_set_bit(IIO_BUSY_BIT_POS,
-                            &indio_dev->event_interface->flags)) {
-               mutex_unlock(&indio_dev->event_interface->event_list_lock);
+       mutex_lock(&ev_int->event_list_lock);
+       if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+               mutex_unlock(&ev_int->event_list_lock);
                return -EBUSY;
        }
-       mutex_unlock(&indio_dev->event_interface->event_list_lock);
-       return anon_inode_getfd("iio:event",
-                               &iio_event_chrdev_fileops,
-                               indio_dev->event_interface, O_RDONLY);
+       mutex_unlock(&ev_int->event_list_lock);
+       fd = anon_inode_getfd("iio:event",
+                               &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+       if (fd < 0) {
+               mutex_lock(&ev_int->event_list_lock);
+               clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+               mutex_unlock(&ev_int->event_list_lock);
+       }
+       return fd;
 }
 
 static int __init iio_init(void)
index d335c7d6fa0f849ac22dbdbe526948dafb5e872b..828526d4c28985221e6b6ebba019053e1a137d21 100644 (file)
@@ -32,8 +32,8 @@
 #include "as102_fw.h"
 #include "dvbdev.h"
 
-int debug;
-module_param_named(debug, debug, int, 0644);
+int as102_debug;
+module_param_named(debug, as102_debug, int, 0644);
 MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)");
 
 int dual_tuner;
index bcda635b5a9967454f95c843ccf4ae4c42a22d15..fd33f5a12dcc2167cf9dada508b13521f7159b8f 100644 (file)
@@ -37,7 +37,8 @@ extern struct spi_driver as102_spi_driver;
 #define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
 #define DRIVER_NAME "as10x_usb"
 
-extern int debug;
+extern int as102_debug;
+#define debug  as102_debug
 
 #define dprintk(debug, args...) \
        do { if (debug) {       \
index b445cd63f901dc4299d55eef54c6675151d8387a..2542c37439049e8e878f29600b8d08c7f6635cd3 100644 (file)
@@ -275,7 +275,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
                CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
-                       hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
+                       hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
                        hw_buffer.s.size = fs->size;
                        CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
                }
index fb2e89c3056c0c0c0ff1b1b02ada8f60eab8a551..5385da2e9cdbcdf23186738a787a7b8dba2e553a 100644 (file)
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        {USB_DEVICE(0x0DF6, 0x0045)},
        {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
        {USB_DEVICE(0x0DF6, 0x004B)},
+       {USB_DEVICE(0x0DF6, 0x005D)},
        {USB_DEVICE(0x0DF6, 0x0063)},
        /* Sweex */
        {USB_DEVICE(0x177F, 0x0154)},
index 480b0ed2e4de8975ea315504190c5eff02e5ac59..115635f9502456d7e8e3f16470b7f4a269b23b5b 100644 (file)
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
        th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
        if (IS_ERR(th)) {
                printk(KERN_ERR "Unable to start the device-scanning thread\n");
+               complete(&dev->scanning_done);
                quiesce_and_remove_host(dev);
                err = PTR_ERR(th);
                goto errout;
index 5cde96b2e6e17d10c71f03a1e8e13fcb78179e6a..5c2a15b42dfede39997352cc7289871bd4dfd161 100644 (file)
@@ -1,6 +1,6 @@
 config SLICOSS
        tristate "Alacritech Gigabit IS-NIC support"
-       depends on PCI && X86
+       depends on PCI && X86 && NET
        default n
        help
          This driver supports Alacritech's IS-NIC gigabit ethernet cards.
index 3d1279c424a85155163c498ca463d0838fdfab65..7eb56178fb641d5090c5cbb69c3782112ca9b786 100644 (file)
@@ -54,6 +54,7 @@
 
 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
 #define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS                4
 
 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
 #define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
  */
 void dsp_clk_exit(void)
 {
+       int i;
+
        dsp_clock_disable_all(dsp_clocks);
 
+       for (i = 0; i < DM_TIMER_CLOCKS; i++)
+               omap_dm_timer_free(timer[i]);
+
        clk_put(iva2_clk);
        clk_put(ssi.sst_fck);
        clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
 void dsp_clk_init(void)
 {
        static struct platform_device dspbridge_device;
+       int i, id;
 
        dspbridge_device.dev.bus = &platform_bus_type;
 
+       for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+               timer[i] = omap_dm_timer_request_specific(id);
+
        iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
        if (IS_ERR(iva2_clk))
                dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
                clk_enable(iva2_clk);
                break;
        case GPT_CLK:
-               timer[clk_id - 1] =
-                               omap_dm_timer_request_specific(DMT_ID(clk_id));
+               status = omap_dm_timer_start(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
                clk_disable(iva2_clk);
                break;
        case GPT_CLK:
-               omap_dm_timer_free(timer[clk_id - 1]);
+               status = omap_dm_timer_stop(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
index c43c7e3421c851f48f1942c31cd9fd755ce2b0f3..76cfc6edecd9e26d46f7c3b37fbbf6d81cebdcd3 100644 (file)
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
-
-#ifdef MODULE
 #include <linux/module.h>
-#endif
-
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
index 09c44abb89e8239c322bacb1671418dcd4b32e0a..3872b8cccdcf715067f273e091f3fd2cf419d6ac 100644 (file)
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 {
        struct usbip_device *ud = &vdev->ud;
        struct urb *urb;
+       unsigned long flags;
 
        spin_lock(&vdev->priv_lock);
        urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 
        usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
 
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 {
        struct vhci_unlink *unlink;
        struct urb *urb;
+       unsigned long flags;
 
        usbip_dump_header(pdu);
 
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                urb->status = pdu->u.ret_unlink.status;
                pr_info("urb->status %d\n", urb->status);
 
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
                usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
index 0fd96c10271d8364c9b6e69de8dc80ecba567501..8599545cdf9e6d6c90a07510844f6290ab0a1be6 100644 (file)
@@ -614,13 +614,12 @@ int iscsit_add_reject(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        if (add_to_conn) {
                spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
                                " non-existent or non-exported iSCSI LUN:"
                                " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
                }
-               if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
-                       return iscsit_add_reject_from_cmd(
-                                       ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                                       1, 1, buf, cmd);
-
                send_check_condition = 1;
                goto attach_cmd;
        }
@@ -1044,6 +1037,8 @@ done:
                 */
                send_check_condition = 1;
        } else {
+               cmd->data_length = cmd->se_cmd.data_length;
+
                if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
                        return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
         * the backend memory allocation.
         */
        ret = transport_generic_new_cmd(&cmd->se_cmd);
-       if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+       if (ret < 0) {
                immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
                dump_immediate_data = 1;
                goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
 
                spin_lock_irqsave(&se_cmd->t_state_lock, flags);
                if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
-                    (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+                    (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
                        dump_unsolicited_data = 1;
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
        if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
                if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                }
        }
        hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
        hdr->flags              |= ISCSI_FLAG_CMD_FINAL;
        if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        }
        hdr->response           = cmd->iscsi_response;
        hdr->cmd_status         = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
        hdr                     = (struct iscsi_tm_rsp *) cmd->pdu;
        memset(hdr, 0, ISCSI_HDR_LEN);
        hdr->opcode             = ISCSI_OP_SCSI_TMFUNC_RSP;
+       hdr->flags              = ISCSI_FLAG_CMD_FINAL;
        hdr->response           = iscsit_convert_tcm_tmr_rsp(se_tmr);
        hdr->itt                = cpu_to_be32(cmd->init_task_tag);
        cmd->stat_sn            = conn->stat_sn++;
index beb39469e7f1e4f91c971f6da468c742a338699d..1cd6ce373b83508fd396f82b80290b91372c9e96 100644 (file)
 
 static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
 {
-       int j = DIV_ROUND_UP(len, 2);
+       int j = DIV_ROUND_UP(len, 2), rc;
 
-       hex2bin(dst, src, j);
+       rc = hex2bin(dst, src, j);
+       if (rc < 0)
+               pr_debug("CHAP string contains non hex digit symbols\n");
 
        dst[j] = '\0';
        return j;
index 3723d90d5ae573db84b658fbd70bf2cd7a82bafa..f1a02dad05a02855b4ef59a6341e4bb61660ef30 100644 (file)
@@ -398,7 +398,6 @@ struct iscsi_cmd {
        u32                     pdu_send_order;
        /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
        u32                     pdu_start;
-       u32                     residual_count;
        /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
        u32                     seq_send_order;
        /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
        atomic_t                connection_exit;
        atomic_t                connection_recovery;
        atomic_t                connection_reinstatement;
-       atomic_t                connection_wait;
        atomic_t                connection_wait_rcfr;
        atomic_t                sleep_on_conn_wait_comp;
        atomic_t                transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
        atomic_t                session_reinstatement;
        atomic_t                session_stop_active;
        atomic_t                sleep_on_sess_wait_comp;
-       atomic_t                transport_wait_cmds;
        /* connection list */
        struct list_head        sess_conn_list;
        struct list_head        cr_active_list;
index c4c68da3e5004b3fa39eeb71829bbefab4e38632..101b1beb3bca205aed7611ec4424f54cc5b20671 100644 (file)
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
                 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
                 */
                if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
-                       if (se_cmd->se_cmd_flags &
-                                       SCF_SCSI_RESERVATION_CONFLICT) {
+                       if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
                                cmd->i_state = ISTATE_SEND_STATUS;
                                spin_unlock_bh(&cmd->istate_lock);
                                iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
index daad362a93cecebeca5c9bcce26233da3b202df1..d734bdec24f9cf2b451a7f905b07fab421a19b3b 100644 (file)
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Could not allocate memory for session\n");
-               return -1;
+               return -ENOMEM;
        }
 
        iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
                pr_err("idr_pre_get() for sess_idr failed\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
        spin_lock(&sess_idr_lock);
        idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Unable to allocate memory for"
                                " struct iscsi_sess_ops.\n");
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        sess->se_sess = transport_init_session();
-       if (!sess->se_sess) {
+       if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        return 0;
index 426cd4bf6a9aab344425723cd51766aa932cc536..98936cb7c2947ceb0edbaa41dda91d6641a9b892 100644 (file)
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
                return NULL;
        }
 
-       login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!login->req) {
                pr_err("Unable to allocate memory for Login Request.\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                goto out;
        }
-       memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
 
        login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
        if (!login->req_buf) {
index 3df1c9b8ae6b7e07118575ebbf24a59124644b01..81d5832fbbd537e7bbffe2c21b1792c1e7a2acde 100644 (file)
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
                        scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                        &tl_cmd->tl_sense_buf[0]);
 
-       /*
-        * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
-        */
        if (scsi_bidi_cmnd(sc))
-               se_cmd->t_tasks_bidi = 1;
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+
        /*
         * Locate the struct se_lun pointer and attach it to struct se_cmd
         */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
         * Allocate the necessary tasks to complete the received CDB+data
         */
        ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
-       if (ret == -ENOMEM) {
-               /* Out of Resources */
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-       } else if (ret == -EINVAL) {
-               /*
-                * Handle case for SAM_STAT_RESERVATION_CONFLICT
-                */
-               if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
-               /*
-                * Otherwise, return SAM_STAT_CHECK_CONDITION and return
-                * sense data.
-                */
-               return PYX_TRANSPORT_USE_SENSE_REASON;
-       }
-
+       if (ret != 0)
+               return ret;
        /*
         * For BIDI commands, pass in the extra READ buffer
         * to transport_generic_map_mem_to_cmd() below..
         */
-       if (se_cmd->t_tasks_bidi) {
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                struct scsi_data_buffer *sdb = scsi_in(sc);
 
                sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
        }
 
        /* Tell the core about our preallocated memory */
-       ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+       return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                        scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
-       if (ret < 0)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
-       return 0;
 }
 
 /*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
 {
        struct tcm_loop_hba *tl_hba = container_of(wwn,
                                struct tcm_loop_hba, tl_hba_wwn);
-       int host_no = tl_hba->sh->host_no;
+
+       pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+               tl_hba->tl_wwn_address, tl_hba->sh->host_no);
        /*
         * Call device_unregister() on the original tl_hba->dev.
         * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
         * release *tl_hba;
         */
        device_unregister(&tl_hba->dev);
-
-       pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
-               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
-               config_item_name(&wwn->wwn_group.cg_item), host_no);
 }
 
 /* Start items for tcm_loop_cit */
index 88f2ad43ec8b589922e34de340dac32325080f13..1dcbef499d6a09f2add951dee10c955f3541ccb8 100644 (file)
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        int alua_access_state, primary = 0, rc;
        u16 tg_pt_id, rtpi;
 
-       if (!l_port)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+       if (!l_port) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
        buf = transport_kmap_first_data_page(cmd);
 
        /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
        if (!l_tg_pt_gp_mem) {
                pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!l_tg_pt_gp) {
                spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
                pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!rc) {
                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
                                " while TPGS_EXPLICT_ALUA is disabled\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
 
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * REQUEST, and the additional sense code set to INVALID
                         * FIELD IN PARAMETER LIST.
                         */
-                       rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       rc = -EINVAL;
                        goto out;
                }
                rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * throw an exception with ASCQ: INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
         * struct t10_alua_lu_gp.
         */
        spin_lock(&lu_gps_lock);
-       atomic_set(&lu_gp->lu_gp_shutdown, 1);
        list_del(&lu_gp->lu_gp_node);
        alua_lu_gps_count--;
        spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
 
        tg_pt_gp_mem->tg_pt = port;
        port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-       atomic_set(&port->sep_tg_pt_gp_active, 1);
 
        return tg_pt_gp_mem;
 }
index 683ba02b8247feddd92777fc46824196e5a60180..831468b3163d777f3eb5c982fc05819d37dea3e6 100644 (file)
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
        if (cmd->data_length < 60)
                return 0;
 
-       buf[2] = 0x3c;
+       buf[3] = 0x3c;
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
        if (cmd->data_length < 4) {
                pr_err("SCSI Inquiry payload length: %u"
                        " too small for EVPD=1\n", cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
 
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
        }
 
        pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+       cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
        ret = -EINVAL;
 
 out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
        default:
                pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
                       cdb[2] & 0x3f, cdb[3]);
-               return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+               return -EINVAL;
        }
        offset += length;
 
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
        if (cdb[1] & 0x01) {
                pr_err("REQUEST_SENSE description emulation not"
                        " supported\n");
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -ENOSYS;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("UNMAP emulation not supported for: %s\n",
                                dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("WRITE_SAME emulation not supported"
                                " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
 int target_emulate_synchronize_cache(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
+       struct se_cmd *cmd = task->task_se_cmd;
 
        if (!dev->transport->do_sync_cache) {
                pr_err("SYNCHRONIZE_CACHE emulation not supported"
                        " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        dev->transport->do_sync_cache(task);
index e0c1e8a8dd4e2140e13d7adedb4c84d414b45725..93d4f6a1b7980c597c119ae7f7f0506d57846993 100644 (file)
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
 static struct config_group alua_group;
 static struct config_group alua_lu_gps_group;
 
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
 static inline struct se_hba *
 item_to_hba(struct config_item *item)
 {
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
                                " struct se_subsystem_dev\n");
                goto unlock;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
                        " from allocate_virtdevice()\n");
                goto out;
        }
-       spin_lock(&se_device_lock);
-       list_add_tail(&se_dev->se_dev_node, &se_dev_list);
-       spin_unlock(&se_device_lock);
 
        config_group_init_type_name(&se_dev->se_dev_group, name,
                        &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
        mutex_lock(&hba->hba_access_mutex);
        t = hba->transport;
 
-       spin_lock(&se_device_lock);
-       list_del(&se_dev->se_dev_node);
-       spin_unlock(&se_device_lock);
-
        dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
        for (i = 0; dev_stat_grp->default_groups[i]; i++) {
                df_item = &dev_stat_grp->default_groups[i]->cg_item;
index ba5edec2c5f858edaa011a463fae7bb31ad2a008..9b8639425472d8322aab749c6ae03fb56ea2a377 100644 (file)
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
@@ -708,7 +705,7 @@ done:
 
        se_task->task_scsi_status = GOOD;
        transport_complete_task(se_task, 1);
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("dpo_emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("dpo_emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (dev->transport->fua_write_emulated == 0) {
+       if (flag && dev->transport->fua_write_emulated == 0) {
                pr_err("fua_write_emulated not supported\n");
                return -EINVAL;
        }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("ua read emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("ua read emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->write_cache_emulated == 0) {
+       if (flag && dev->transport->write_cache_emulated == 0) {
                pr_err("write_cache_emulated not supported\n");
                return -EINVAL;
        }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
                ret = -ENOMEM;
                goto out;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
index 67cd6fe05bfa7c751596752da1ff0dd00035908f..b4864fba4ef0d511758916a8debac60ee9f43674 100644 (file)
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
                if (ret > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                   cmd->t_tasks_fua) {
+                   (cmd->se_cmd_flags & SCF_FUA)) {
                        /*
                         * We might need to be a bit smarter here
                         * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
 
        }
 
-       if (ret < 0)
+       if (ret < 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return ret;
+       }
        if (ret) {
                task->task_scsi_status = GOOD;
                transport_complete_task(task, 1);
        }
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     fd_free_task(): (Part of se_subsystem_api_t template)
index 7698efe29262bfd8a7cb521016da0aff1ff0517d..4aa9922044382628fc21e12d8df3ab67840c23ae 100644 (file)
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
                 */
                if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
                    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                    task->task_se_cmd->t_tasks_fua))
+                    (cmd->se_cmd_flags & SCF_FUA)))
                        rw = WRITE_FUA;
                else
                        rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
        else {
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
                                " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOSYS;
        }
 
        bio = iblock_get_bio(task, block_lba, sg_num);
-       if (!bio)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       if (!bio) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
+       }
 
        bio_list_init(&list);
        bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
                submit_bio(rw, bio);
        blk_finish_plug(&plug);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while ((bio = bio_list_pop(&list)))
                bio_put(bio);
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static u32 iblock_get_device_rev(struct se_device *dev)
index 5a4ebfc3a54f34791f6df93a36e02d85d752ebf7..95dee7074aeb5eb05f630fdded543e948ac206b3 100644 (file)
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
                pr_err("Received legacy SPC-2 RESERVE/RELEASE"
                        " while active SPC-3 registrations exist,"
                        " returning RESERVATION_CONFLICT\n");
-               *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
                return true;
        }
 
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
            (cmd->t_task_cdb[1] & 0x02)) {
                pr_err("LongIO and Obselete Bits set, returning"
                                " ILLEGAL_REQUEST\n");
-               ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
                        " from %s \n", cmd->se_lun->unpacked_lun,
                        cmd->se_deve->mapped_lun,
                        sess->se_node_acl->initiatorname);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out_unlock;
        }
 
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
                pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
                        " does not equal CDB data_length: %u\n", tpdl,
                        cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
                                        " for tmp_tpg\n");
                                atomic_dec(&tmp_tpg->tpg_pr_ref_count);
                                smp_mb__after_atomic_dec();
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
                        /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
                                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                                smp_mb__after_atomic_dec();
                                core_scsi3_tpg_undepend_item(tmp_tpg);
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
 
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
                if (!dest_tpg) {
                        pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
                                        " dest_tpg\n");
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
                                " %u for Transport ID: %s\n", tid_len, ptr);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
 
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
                        smp_mb__after_atomic_dec();
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_lunacl_undepend_item(dest_se_deve);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -ENOMEM;
                        goto out;
                }
                INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
                if (res_key) {
                        pr_warn("SPC-3 PR: Reservation Key non-zero"
                                " for SA REGISTER, returning CONFLICT\n");
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * Do nothing but return GOOD status.
                 */
                if (!sa_res_key)
-                       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+                       return 0;
 
                if (!spec_i_pt) {
                        /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
                        if (ret != 0) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               return -EINVAL;
                        }
                } else {
                        /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
                                        " 0x%016Lx\n", res_key,
                                        pr_reg->pr_res_key);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
                }
                if (spec_i_pt) {
                        pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
                                " set while sa_res_key=0\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       return -EINVAL;
                }
                /*
                 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
                                " registration exists, but ALL_TG_PT=1 bit not"
                                " present in received PROUT\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_CDB_FIELD;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
                }
                /*
                 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
                                pr_err("Unable to allocate"
                                        " pr_aptpl_buf\n");
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               return -EINVAL;
                        }
                }
                /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
                        if (pr_holder < 0) {
                                kfree(pr_aptpl_buf);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
 
                        spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RESERVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
                        " does not match existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
        default:
                pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
                        " 0x%02x\n", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RELEASE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
            (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
 
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
        if (!pr_reg_n) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for CLEAR\n");
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
                        " existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
        int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
        int prh_type = 0, prh_scope = 0, ret;
 
-       if (!se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for PREEMPT%s\n",
                        (abort) ? "_AND_ABORT" : "");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (pr_reg_n->pr_res_key != res_key) {
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&preempt_and_abort_list);
 
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
        if (!all_reg && !sa_res_key) {
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
                if (!released_regs) {
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg_n);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
        default:
                pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
                        " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        memset(dest_iport, 0, 64);
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
                        " *pr_reg for REGISTER_AND_MOVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " res_key: 0x%016Lx does not match existing SA REGISTER"
                        " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
                        " sa_res_key\n");
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " does not equal CDB data_length: %u\n", tid_len,
                        cmd->data_length);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
                        smp_mb__after_atomic_dec();
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -EINVAL;
                }
 
                spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " fabric ops from Relative Target Port Identifier:"
                        " %hu\n", rtpi);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " from fabric: %s\n", proto_ident,
                        dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
                        dest_tf_ops->get_fabric_name());
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
                        " containg a valid tpg_parse_pr_out_transport_id"
                        " function pointer\n");
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
        initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!initiator_str) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
                        " initiator_str from Transport ID\n");
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
                        " matches: %s on received I_T Nexus\n", initiator_str,
                        pr_reg_nacl->initiatorname);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " matches: %s %s on received I_T Nexus\n",
                        initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
                        pr_reg->pr_reg_isid);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
                pr_err("Unable to locate %s dest_node_acl for"
                        " TransportID%s\n", dest_tf_ops->get_fabric_name(),
                        initiator_str);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_node_acl = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
        if (!dest_se_deve) {
                pr_err("Unable to locate %s dest_se_deve from RTPI:"
                        " %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3553,7 +3615,8 @@ after_iport_check:
                atomic_dec(&dest_se_deve->pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_se_deve = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
                        " currently held\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3585,7 +3649,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
                        " Nexus is not reservation holder\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3603,7 +3668,8 @@ after_iport_check:
                        " reservation for type: %s\n",
                        core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
                                sa_res_key, 0, aptpl, 2, 1);
                if (ret != 0) {
                        spin_unlock(&dev->dev_reservation_lock);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = EINVAL;
                goto out;
        }
 
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * FIXME: A NULL struct se_session pointer means an this is not coming from
         * a $FABRIC_MOD's nexus, but from internal passthrough ops.
         */
-       if (!cmd->se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!cmd->se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        if (cmd->data_length < 24) {
                pr_warn("SPC-PR: Received PR OUT parameter list"
                        " length too small: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * SPEC_I_PT=1 is only valid for Service action: REGISTER
         */
        if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
            (cmd->data_length != 24)) {
                pr_warn("SPC-PR: Received PR OUT illegal parameter"
                        " list length: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_OUT service"
                        " action: 0x%02x\n", cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
        if (cmd->data_length < 6) {
                pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
                        " %u too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
 
        switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_IN service"
                        " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
index ed32e1efe42906bbfe91a309889c0acde60a8da5..8b15e56b038461169872d964055316c0318e7e31 100644 (file)
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
 static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
                struct bio **hbio)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        u32 task_sg_num = task->task_sg_nents;
        struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
        u32 data_len = task->task_size, i, len, bytes, off;
        int nr_pages = (task->task_size + task_sg[0].offset +
                        PAGE_SIZE - 1) >> PAGE_SHIFT;
-       int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       int nr_vecs = 0, rc;
        int rw = (task->task_data_direction == DMA_TO_DEVICE);
 
        *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return ret;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static int pscsi_do_task(struct se_task *task)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        struct pscsi_plugin_task *pt = PSCSI_TASK(task);
        struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
                if (!req || IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed: %ld\n",
                                        req ? IS_ERR(req) : -ENOMEM);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -ENODEV;
                }
        } else {
                BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
                 * Setup the main struct request for the task->task_sg[] payload
                 */
                ret = pscsi_map_sg(task, task->task_sg, &hbio);
-               if (ret < 0)
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               if (ret < 0) {
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return ret;
+               }
 
                req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
                                       GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
                        (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
                        pscsi_req_done);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 /*     pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
                        " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
                        pt->pscsi_result);
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               task->task_se_cmd->scsi_sense_reason =
+                                       TCM_UNSUPPORTED_SCSI_OPCODE;
                transport_complete_task(task, 0);
                break;
        }
index 5158d3846f19cf8f79f69e7efe97f72558b0d413..02e51faa2f4ea168f0a6139c8e303fc9fca81c28 100644 (file)
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
        return NULL;
 }
 
-/*     rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
 {
        struct se_task *task = &req->rd_task;
        struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
        struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
+       struct scatterlist *rd_sg;
+       struct sg_mapping_iter m;
        u32 rd_offset = req->rd_offset;
+       u32 src_len;
 
        table = rd_get_sg_table(dev, req->rd_page);
        if (!table)
                return -EINVAL;
 
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = task->task_sg;
-       sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+       rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
 
-       pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
-               " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       src_offset = rd_offset;
+       pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+                       dev->rd_dev_id, read_rd ? "Read" : "Write",
+                       task->task_lba, req->rd_size, req->rd_page,
+                       rd_offset);
 
+       src_len = PAGE_SIZE - rd_offset;
+       sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+                       read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
        while (req->rd_size) {
-               if ((sg_d[i].length - dst_offset) <
-                   (sg_s[j].length - src_offset)) {
-                       length = (sg_d[i].length - dst_offset);
-
-                       pr_debug("Step 1 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
-                               sg_s[j].length);
-                       pr_debug("Step 1 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src = sg_virt(&sg_s[j]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst_offset = 0;
-                       src_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_s[j].length - src_offset);
-
-                       pr_debug("Step 2 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset,
-                               j, sg_s[j].length);
-                       pr_debug("Step 2 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       if (sg_d[i].length == length) {
-                               i++;
-                               dst_offset = 0;
-                       } else
-                               dst_offset = length;
-
-                       src = sg_virt(&sg_s[j++]) + src_offset;
-                       BUG_ON(!src);
-
-                       src_offset = 0;
-                       page_end = 1;
-               }
+               u32 len;
+               void *rd_addr;
 
-               memcpy(dst, src, length);
+               sg_miter_next(&m);
+               len = min((u32)m.length, src_len);
+               m.consumed = len;
 
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
+               rd_addr = sg_virt(rd_sg) + rd_offset;
 
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
+               if (read_rd)
+                       memcpy(m.addr, rd_addr, len);
+               else
+                       memcpy(rd_addr, m.addr, len);
 
-               if (!page_end)
+               req->rd_size -= len;
+               if (!req->rd_size)
                        continue;
 
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               src_len -= len;
+               if (src_len) {
+                       rd_offset += len;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
-               table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
-                       return -EINVAL;
-
-               sg_s = &table->sg_table[j = 0];
-       }
-
-       return 0;
-}
-
-/*     rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
-       struct se_task *task = &req->rd_task;
-       struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
-       struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
-       u32 rd_offset = req->rd_offset;
-
-       table = rd_get_sg_table(dev, req->rd_page);
-       if (!table)
-               return -EINVAL;
-
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
-       sg_s = task->task_sg;
-
-       pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
-               " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       dst_offset = rd_offset;
-
-       while (req->rd_size) {
-               if ((sg_s[i].length - src_offset) <
-                   (sg_d[j].length - dst_offset)) {
-                       length = (sg_s[i].length - src_offset);
-
-                       pr_debug("Step 1 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 1 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i++]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst = sg_virt(&sg_d[j]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src_offset = 0;
-                       dst_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_d[j].length - dst_offset);
-
-                       pr_debug("Step 2 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 2 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i]) + src_offset;
-                       BUG_ON(!src);
-
-                       if (sg_s[i].length == length) {
-                               i++;
-                               src_offset = 0;
-                       } else
-                               src_offset = length;
-
-                       dst = sg_virt(&sg_d[j++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       dst_offset = 0;
-                       page_end = 1;
-               }
-
-               memcpy(dst, src, length);
-
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
-
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
-
-               if (!page_end)
-                       continue;
-
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               /* rd page completed, next one please */
+               req->rd_page++;
+               rd_offset = 0;
+               src_len = PAGE_SIZE;
+               if (req->rd_page <= table->page_end_offset) {
+                       rd_sg++;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
                table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
+               if (!table) {
+                       sg_miter_stop(&m);
                        return -EINVAL;
+               }
 
-               sg_d = &table->sg_table[j = 0];
+               /* since we increment, the first sg entry is correct */
+               rd_sg = table->sg_table;
        }
-
+       sg_miter_stop(&m);
        return 0;
 }
 
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
        struct rd_request *req = RD_REQ(task);
-       unsigned long long lba;
+       u64 tmp;
        int ret;
 
-       req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
-       lba = task->task_lba;
-       req->rd_offset = (do_div(lba,
-                         (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
-                          dev->se_sub_dev->se_dev_attrib.block_size;
+       tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+       req->rd_offset = do_div(tmp, PAGE_SIZE);
+       req->rd_page = tmp;
        req->rd_size = task->task_size;
 
-       if (task->task_data_direction == DMA_FROM_DEVICE)
-               ret = rd_MEMCPY_read(req);
-       else
-               ret = rd_MEMCPY_write(req);
-
+       ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
        if (ret != 0)
                return ret;
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
-
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     rd_free_task(): (Part of se_subsystem_api_t template)
index 217e29df62977559d1886320ea54c6dbe7d04fab..684522805a1f370a99a5745c2fa3815a9c722912 100644 (file)
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
                        " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
                        "Preempt" : "", cmd, cmd->t_state,
                        atomic_read(&cmd->t_fe_count));
-               /*
-                * Signal that the command has failed via cmd->se_cmd_flags,
-                */
-               transport_new_cmd_failure(cmd);
 
                core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
                                atomic_read(&cmd->t_fe_count));
index 3400ae6e93f83d2ae5b25395b97bbd6158877ec0..0257658e2e3ea8a75642ae0dcabc77547ac2379b 100644 (file)
@@ -61,7 +61,6 @@
 static int sub_api_initialized;
 
 static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_tmr_req_cache;
 struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
 {
-       se_cmd_cache = kmem_cache_create("se_cmd_cache",
-                       sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
-       if (!se_cmd_cache) {
-               pr_err("kmem_cache_create for struct se_cmd failed\n");
-               goto out;
-       }
        se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
                        sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
                        0, NULL);
        if (!se_tmr_req_cache) {
                pr_err("kmem_cache_create() for struct se_tmr_req"
                                " failed\n");
-               goto out_free_cmd_cache;
+               goto out;
        }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
        kmem_cache_destroy(se_sess_cache);
 out_free_tmr_req_cache:
        kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
-       kmem_cache_destroy(se_cmd_cache);
 out:
        return -ENOMEM;
 }
@@ -191,7 +182,6 @@ out:
 void release_se_kmem_caches(void)
 {
        destroy_workqueue(target_completion_wq);
-       kmem_cache_destroy(se_cmd_cache);
        kmem_cache_destroy(se_tmr_req_cache);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
                task->task_scsi_status = GOOD;
        } else {
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_ILLEGAL_REQUEST;
+               task->task_se_cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        }
 
        transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd, 1, 1);
+       transport_generic_request_failure(cmd);
 }
 
 /*     transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
        if (cmd->t_tasks_failed) {
                if (!task->task_error_status) {
                        task->task_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-                       cmd->transport_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
+
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
        dev->se_hba             = hba;
        dev->se_sub_dev         = se_dev;
        dev->transport          = transport;
-       atomic_set(&dev->active_cmds, 0);
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
        INIT_LIST_HEAD(&dev->dev_tmr_list);
        INIT_LIST_HEAD(&dev->execute_task_list);
        INIT_LIST_HEAD(&dev->delayed_cmd_list);
-       INIT_LIST_HEAD(&dev->ordered_cmd_list);
        INIT_LIST_HEAD(&dev->state_task_list);
        INIT_LIST_HEAD(&dev->qf_cmd_list);
        spin_lock_init(&dev->execute_task_lock);
        spin_lock_init(&dev->delayed_cmd_lock);
-       spin_lock_init(&dev->ordered_cmd_lock);
-       spin_lock_init(&dev->state_task_lock);
-       spin_lock_init(&dev->dev_alua_lock);
        spin_lock_init(&dev->dev_reservation_lock);
        spin_lock_init(&dev->dev_status_lock);
-       spin_lock_init(&dev->dev_status_thr_lock);
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_lun_node);
        INIT_LIST_HEAD(&cmd->se_delayed_node);
-       INIT_LIST_HEAD(&cmd->se_ordered_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
        INIT_LIST_HEAD(&cmd->se_queue_node);
        INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
                pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
        /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        return -ENOMEM;
                }
        } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
-       if (ret < 0) {
-               cmd->transport_error_status = ret;
-               transport_generic_request_failure(cmd, 0,
-                               (cmd->data_direction != DMA_TO_DEVICE));
-       }
+       if (ret < 0)
+               transport_generic_request_failure(cmd);
+
        return 0;
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-static void transport_generic_request_failure(
-       struct se_cmd *cmd,
-       int complete,
-       int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
 {
        int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+       pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
-               cmd->t_state,
-               cmd->transport_error_status);
+               cmd->t_state, cmd->scsi_sense_reason);
        pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
                " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
        if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
                transport_complete_task_attr(cmd);
 
-       if (complete) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-       }
-
-       switch (cmd->transport_error_status) {
-       case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               break;
-       case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
-               cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
-               break;
-       case PYX_TRANSPORT_INVALID_CDB_FIELD:
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               break;
-       case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               break;
-       case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
-               if (!sc)
-                       transport_new_cmd_failure(cmd);
-               /*
-                * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
-                * we force this session to fall back to session
-                * recovery.
-                */
-               cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
-               cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
-               goto check_stop;
-       case PYX_TRANSPORT_LU_COMM_FAILURE:
-       case PYX_TRANSPORT_ILLEGAL_REQUEST:
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               break;
-       case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
-               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
-               break;
-       case PYX_TRANSPORT_WRITE_PROTECTED:
-               cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+       switch (cmd->scsi_sense_reason) {
+       case TCM_NON_EXISTENT_LUN:
+       case TCM_UNSUPPORTED_SCSI_OPCODE:
+       case TCM_INVALID_CDB_FIELD:
+       case TCM_INVALID_PARAMETER_LIST:
+       case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+       case TCM_UNKNOWN_MODE_PAGE:
+       case TCM_WRITE_PROTECTED:
+       case TCM_CHECK_CONDITION_ABORT_CMD:
+       case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+       case TCM_CHECK_CONDITION_NOT_READY:
                break;
-       case PYX_TRANSPORT_RESERVATION_CONFLICT:
+       case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
                 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
                if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                goto check_stop;
-       case PYX_TRANSPORT_USE_SENSE_REASON:
-               /*
-                * struct se_cmd->scsi_sense_reason already set
-                */
-               break;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
-                       cmd->t_task_cdb[0],
-                       cmd->transport_error_status);
+                       cmd->t_task_cdb[0], cmd->scsi_sense_reason);
                cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
         * transport_send_check_condition_and_sense() after handling
         * possible unsoliticied write data payloads.
         */
-       if (!sc && !cmd->se_tfo->new_cmd_map)
-               transport_new_cmd_failure(cmd);
-       else {
-               ret = transport_send_check_condition_and_sense(cmd,
-                               cmd->scsi_sense_reason, 0);
-               if (ret == -EAGAIN || ret == -ENOMEM)
-                       goto queue_full;
-       }
+       ret = transport_send_check_condition_and_sense(cmd,
+                       cmd->scsi_sense_reason, 0);
+       if (ret == -EAGAIN || ret == -ENOMEM)
+               goto queue_full;
 
 check_stop:
        transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
         if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_inc(&cmd->se_dev->dev_hoq_count);
-               smp_mb__after_atomic_inc();
                pr_debug("Added HEAD_OF_QUEUE for CDB:"
                        " 0x%02x, se_ordered_id: %u\n",
                        cmd->t_task_cdb[0],
                        cmd->se_ordered_id);
                return 1;
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&cmd->se_dev->ordered_cmd_lock);
-               list_add_tail(&cmd->se_ordered_node,
-                               &cmd->se_dev->ordered_cmd_list);
-               spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
                atomic_inc(&cmd->se_dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
 
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
 {
        int add_tasks;
 
-       if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-               transport_generic_request_failure(cmd, 0, 1);
+       if (se_dev_check_online(cmd->se_dev) != 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               transport_generic_request_failure(cmd);
                return 0;
        }
 
@@ -2163,14 +2102,13 @@ check_depth:
        else
                error = dev->transport->do_task(task);
        if (error != 0) {
-               cmd->transport_error_status = error;
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                task->task_flags &= ~TF_ACTIVE;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                atomic_set(&cmd->t_transport_sent, 0);
                transport_stop_tasks_for_cmd(cmd);
                atomic_inc(&dev->depth_left);
-               transport_generic_request_failure(cmd, 0, 1);
+               transport_generic_request_failure(cmd);
        }
 
        goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
        return 0;
 }
 
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
-       unsigned long flags;
-       /*
-        * Any unsolicited data will get dumped for failed command inside of
-        * the fabric plugin
-        */
-       spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-       se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
-       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
 static inline u32 transport_get_sectors_6(
        unsigned char *cdb,
        struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
 
        /*
         * Everything else assume TYPE_DISK Sector CDB location.
-        * Use 8-bit sector value.
+        * Use 8-bit sector value.  SBC-3 says:
+        *
+        *   A TRANSFER LENGTH field set to zero specifies that 256
+        *   logical blocks shall be written.  Any other value
+        *   specifies the number of logical blocks that shall be
+        *   written.
         */
 type_disk:
-       return (u32)cdb[4];
+       return cdb[4] ? : 256;
 }
 
 static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
        return -1;
 }
 
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
-       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-       /*
-        * For UA Interlock Code 11b, a RESERVATION CONFLICT will
-        * establish a UNIT ATTENTION with PREVIOUS RESERVATION
-        * CONFLICT STATUS.
-        *
-        * See spc4r17, section 7.4.6 Control Mode Page, Table 349
-        */
-       if (cmd->se_sess &&
-           cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
-               core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
-                       cmd->orig_fe_lun, 0x2C,
-                       ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-       return -EINVAL;
-}
-
 static inline long long transport_dev_end_lba(struct se_device *dev)
 {
        return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
         */
        if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
                if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
-                                       cmd, cdb, pr_reg_type) != 0)
-                       return transport_handle_reservation_conflict(cmd);
+                                       cmd, cdb, pr_reg_type) != 0) {
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EBUSY;
+               }
                /*
                 * This means the CDB is allowed for the SCSI Initiator port
                 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_64(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case XDWRITEREAD_10:
                if ((cmd->data_direction != DMA_TO_DEVICE) ||
-                   !(cmd->t_tasks_bidi))
+                   !(cmd->se_cmd_flags & SCF_BIDI))
                        goto out_invalid_cdb_field;
                sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
                if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
                 * Setup BIDI XOR callback to be run after I/O completion.
                 */
                cmd->transport_complete_callback = &transport_xor_callback;
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
                service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
                         * completion.
                         */
                        cmd->transport_complete_callback = &transport_xor_callback;
-                       cmd->t_tasks_fua = (cdb[10] & 0x8);
+                       if (cdb[1] & 0x8)
+                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_dec(&dev->dev_hoq_count);
-               smp_mb__after_atomic_dec();
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for"
                        " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&dev->ordered_cmd_lock);
-               list_del(&cmd->se_ordered_node);
                atomic_dec(&dev->dev_ordered_sync);
                smp_mb__after_atomic_dec();
-               spin_unlock(&dev->ordered_cmd_lock);
 
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
 
        if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
            (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+               /*
+                * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+                * scatterlists already have been set to follow what the fabric
+                * passes for the original expected data transfer length.
+                */
+               if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       pr_warn("Rejecting SCSI DATA overflow for fabric using"
+                               " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
+               }
 
                cmd->t_data_sg = sgl;
                cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
            cmd->data_length) {
                ret = transport_generic_get_mem(cmd);
                if (ret < 0)
-                       return ret;
+                       goto out_fail;
        }
 
        /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
                task_cdbs = transport_allocate_control_task(cmd);
        }
 
-       if (task_cdbs <= 0)
+       if (task_cdbs < 0)
                goto out_fail;
+       else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+               cmd->t_state = TRANSPORT_COMPLETE;
+               atomic_set(&cmd->t_transport_active, 1);
+               INIT_WORK(&cmd->work, target_complete_ok_work);
+               queue_work(target_completion_wq, &cmd->work);
+               return 0;
+       }
 
        if (set_counts) {
                atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
        else if (ret < 0)
                return ret;
 
-       return PYX_TRANSPORT_WRITE_PENDING;
+       return 1;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        atomic_inc(&cmd->t_transport_aborted);
                        smp_mb__after_atomic_inc();
-                       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-                       transport_new_cmd_failure(cmd);
-                       return;
                }
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
        struct se_cmd *cmd;
        struct se_device *dev = (struct se_device *) param;
 
-       set_user_nice(current, -20);
-
        while (!kthread_should_stop()) {
                ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
                                atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
                        }
                        ret = cmd->se_tfo->new_cmd_map(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                               0, (cmd->data_direction !=
-                                                   DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
                                break;
                        }
                        ret = transport_generic_new_cmd(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                       0, (cmd->data_direction !=
-                                        DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
+                               break;
                        }
                        break;
                case TRANSPORT_PROCESS_WRITE:
index 4fac37c4c615263abbfa60594cfcfc3f1917dbc7..71fc9cea5dc9ba120b725a4e0f6bf94c879d1340 100644 (file)
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
        lport = ep->lp;
        fp = fc_frame_alloc(lport, sizeof(*txrdy));
        if (!fp)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+               return -ENOMEM; /* Signal QUEUE_FULL */
 
        txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
        memset(txrdy, 0, sizeof(*txrdy));
index 5f770412ca403265ddfc19856ef4934470a25c63..9402b7387cac570d91ff6001a885daed3bbd8b6d 100644 (file)
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
        struct ft_lport_acl *lacl = container_of(wwn,
                                struct ft_lport_acl, fc_lport_wwn);
 
-       pr_debug("del lport %s\n",
-                       config_item_name(&wwn->wwn_group.cg_item));
+       pr_debug("del lport %s\n", lacl->name);
        mutex_lock(&ft_lport_lock);
        list_del(&lacl->list);
        mutex_unlock(&ft_lport_lock);
index 435f6facbc238606feeea4ce26b508ed34ddb6af..44fbebab5075f98f337d880288993b01ad9e7271 100644 (file)
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void)
 
        asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
                : "=r" (__c));
+       isb();
 
        return __c;
 }
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c)
        asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
                : /* no output register */
                : "r" (c));
+       isb();
 }
 
 static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
index 5f479dada6f2b25a309b48c50d5b2dee0a35777d..925a1e547a834f10942ab878357a0568254b6ae8 100644 (file)
@@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60
          Support for the IFX6x60 modem devices on Intel MID platforms.
 
 config SERIAL_PCH_UART
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART"
+       tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
        depends on PCI
        select SERIAL_CORE
        help
@@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART
          which is an IOH(Input/Output Hub) for x86 embedded processor.
          Enabling PCH_DMA, this PCH UART works as DMA mode.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         This driver also can be used for LAPIS Semiconductor IOH(Input/
+         Output Hub), ML7213, ML7223 and ML7831.
+         ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+         for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config SERIAL_MSM_SMD
        bool "Enable tty device interface for some SMD ports"
index 4a0f86fa1e90566c03c693c413a581a80e40e7b6..4c823f341d9895cd88a5f0d710f56449714fef86 100644 (file)
@@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
        if (rs485conf->flags & SER_RS485_ENABLED) {
                dev_dbg(port->dev, "Setting UART to RS485\n");
                atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
-               if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
+               if ((rs485conf->delay_rts_after_send) > 0)
                        UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
                mode |= ATMEL_US_USMODE_RS485;
        } else {
@@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
 
        if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
                dev_dbg(port->dev, "Setting UART to RS485\n");
-               if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+               if ((atmel_port->rs485.delay_rts_after_send) > 0)
                        UART_PUT_TTGR(port,
                                        atmel_port->rs485.delay_rts_after_send);
                mode |= ATMEL_US_USMODE_RS485;
@@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
 
        if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
                dev_dbg(port->dev, "Setting UART to RS485\n");
-               if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+               if ((atmel_port->rs485.delay_rts_after_send) > 0)
                        UART_PUT_TTGR(port,
                                        atmel_port->rs485.delay_rts_after_send);
                mode |= ATMEL_US_USMODE_RS485;
@@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port,
                rs485conf->delay_rts_after_send = rs485_delay[1];
                rs485conf->flags = 0;
 
-               if (rs485conf->delay_rts_before_send == 0 &&
-                   rs485conf->delay_rts_after_send == 0) {
-                       rs485conf->flags |= SER_RS485_RTS_ON_SEND;
-               } else {
-                       if (rs485conf->delay_rts_before_send)
-                               rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND;
-                       if (rs485conf->delay_rts_after_send)
-                               rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
-               }
-
                if (of_get_property(np, "rs485-rx-during-tx", NULL))
                        rs485conf->flags |= SER_RS485_RX_DURING_TX;
 
index b7435043f2fe90d87d646141060318851ad12a86..1dfba7b779c84ed1f151b490295b0a99751b7b72 100644 (file)
@@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty,
                e100_disable_rx(info);
                e100_enable_rx_irq(info);
 #endif
-               if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
-                       (info->rs485.delay_rts_before_send > 0))
-                               msleep(info->rs485.delay_rts_before_send);
+               if (info->rs485.delay_rts_before_send > 0)
+                       msleep(info->rs485.delay_rts_before_send);
        }
 #endif /* CONFIG_ETRAX_RS485 */
 
@@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty,
 
                rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
                rs485data.flags = 0;
-               if (rs485data.delay_rts_before_send != 0)
-                       rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
-               else
-                       rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
 
                if (rs485ctrl.enabled)
                        rs485data.flags |= SER_RS485_ENABLED;
@@ -4531,7 +4526,6 @@ static int __init rs_init(void)
                /* Set sane defaults */
                info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
                info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
-               info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
                info->rs485.delay_rts_before_send = 0;
                info->rs485.flags &= ~(SER_RS485_ENABLED);
 #endif
index 286c386d9c4677d16cf0effcf9769f34c9249e52..e272d3919c67fdb6b465902fe7a5cb17978cc3a5 100644 (file)
@@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
 {
        struct uart_hsu_port *up =
                        container_of(port, struct uart_hsu_port, port);
-       struct tty_struct *tty = port->state->port.tty;
        unsigned char cval, fcr = 0;
        unsigned long flags;
        unsigned int baud, quot;
@@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
        }
 
        /* CMSPAR isn't supported by this driver */
-       if (tty)
-               tty->termios->c_cflag &= ~CMSPAR;
+       termios->c_cflag &= ~CMSPAR;
 
        if (termios->c_cflag & CSTOPB)
                cval |= UART_LCR_STOP;
index 21febef926aa7c31f09ef26dbbbfa34d78f76df1..d6aba8c087e4784370648976c0d018a569c374f4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  *This program is free software; you can redistribute it and/or modify
  *it under the terms of the GNU General Public License as published by
@@ -46,8 +46,8 @@ enum {
 
 /* Set the max number of UART port
  * Intel EG20T PCH: 4 port
- * OKI SEMICONDUCTOR ML7213 IOH: 3 port
- * OKI SEMICONDUCTOR ML7223 IOH: 2 port
+ * LAPIS Semiconductor ML7213 IOH: 3 port
+ * LAPIS Semiconductor ML7223 IOH: 2 port
 */
 #define PCH_UART_NR    4
 
@@ -258,6 +258,8 @@ enum pch_uart_num_t {
        pch_ml7213_uart2,
        pch_ml7223_uart0,
        pch_ml7223_uart1,
+       pch_ml7831_uart0,
+       pch_ml7831_uart1,
 };
 
 static struct pch_uart_driver_data drv_dat[] = {
@@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = {
        [pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
        [pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
        [pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
+       [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
+       [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
 };
 
 static unsigned int default_baud = 9600;
@@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port)
                dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
                        __func__);
                dma_release_channel(priv->chan_tx);
+               priv->chan_tx = NULL;
                return;
        }
 
@@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port)
                dev_err(priv->port.dev,
                        "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
 
-       if (priv->use_dma_flag)
-               pch_free_dma(port);
+       pch_free_dma(port);
 
        free_irq(priv->port.irq, priv);
 }
@@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port,
        if (rtn)
                goto out;
 
+       pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
        /* Don't rewrite B0 */
        if (tty_termios_baud_rate(termios))
                tty_termios_encode_baud_rate(termios, baud, baud);
@@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
         .driver_data = pch_ml7223_uart0},
        {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
         .driver_data = pch_ml7223_uart1},
+       {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
+        .driver_data = pch_ml7831_uart0},
+       {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
+        .driver_data = pch_ml7831_uart1},
        {0,},
 };
 
index 512c49f98e85a2c59e70b2c454877589756376a2..8e0924f55446f963b8b9d79a0d66347d30c982bc 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <linux/kmod.h>
 #include <linux/nsproxy.h>
+#include <linux/ratelimit.h>
 
 /*
  *     This guards the refcounted line discipline lists. The lock
@@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
 /**
  *     tty_ldisc_wait_idle     -       wait for the ldisc to become idle
  *     @tty: tty to wait for
+ *     @timeout: for how long to wait at most
  *
  *     Wait for the line discipline to become idle. The discipline must
  *     have been halted for this to guarantee it remains idle.
  */
-static int tty_ldisc_wait_idle(struct tty_struct *tty)
+static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
 {
-       int ret;
+       long ret;
        ret = wait_event_timeout(tty_ldisc_idle,
-                       atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+                       atomic_read(&tty->ldisc->users) == 1, timeout);
        if (ret < 0)
                return ret;
        return ret > 0 ? 0 : -EBUSY;
@@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        tty_ldisc_flush_works(tty);
 
-       retval = tty_ldisc_wait_idle(tty);
+       retval = tty_ldisc_wait_idle(tty, 5 * HZ);
 
        tty_lock();
        mutex_lock(&tty->ldisc_mutex);
@@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
        if (IS_ERR(ld))
                return -1;
 
-       WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
-
        tty_ldisc_close(tty, tty->ldisc);
        tty_ldisc_put(tty->ldisc);
        tty->ldisc = NULL;
@@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
        tty_unlock();
        cancel_work_sync(&tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
-
+retry:
        tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
@@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
           it means auditing a lot of other paths so this is
           a FIXME */
        if (tty->ldisc) {       /* Not yet closed */
+               if (atomic_read(&tty->ldisc->users) != 1) {
+                       char cur_n[TASK_COMM_LEN], tty_n[64];
+                       long timeout = 3 * HZ;
+                       tty_unlock();
+
+                       while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
+                               timeout = MAX_SCHEDULE_TIMEOUT;
+                               printk_ratelimited(KERN_WARNING
+                                       "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
+                                       __func__, get_task_comm(cur_n, current),
+                                       tty_name(tty, tty_n));
+                       }
+                       mutex_unlock(&tty->ldisc_mutex);
+                       goto retry;
+               }
+
                if (reset == 0) {
 
                        if (!tty_ldisc_reinit(tty, tty->termios->c_line))
index 6960715c5063ce15d18ad1437af834bdb6be1c20..a8078d0638fa09de0f0c0be43c70dd1d94909ee5 100644 (file)
@@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm)
 {
        int i;
 
-       mutex_lock(&open_mutex);
        if (acm->dev) {
                usb_autopm_get_interface(acm->control);
                acm_set_control(acm, acm->ctrlout = 0);
@@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm)
                acm->control->needs_remote_wakeup = 0;
                usb_autopm_put_interface(acm->control);
        }
-       mutex_unlock(&open_mutex);
 }
 
 static void acm_tty_hangup(struct tty_struct *tty)
 {
        struct acm *acm = tty->driver_data;
        tty_port_hangup(&acm->port);
+       mutex_lock(&open_mutex);
        acm_port_down(acm);
+       mutex_unlock(&open_mutex);
 }
 
 static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
           shutdown */
        if (!acm)
                return;
+
+       mutex_lock(&open_mutex);
        if (tty_port_close_start(&acm->port, tty, filp) == 0) {
-               mutex_lock(&open_mutex);
                if (!acm->dev) {
                        tty_port_tty_set(&acm->port, NULL);
                        acm_tty_unregister(acm);
@@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
        acm_port_down(acm);
        tty_port_close_end(&acm->port, tty);
        tty_port_tty_set(&acm->port, NULL);
+       mutex_unlock(&open_mutex);
 }
 
 static int acm_tty_write(struct tty_struct *tty,
@@ -1456,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
        },
        { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
        },
+       /* Motorola H24 HSPA module: */
+       { USB_DEVICE(0x22b8, 0x2d91) }, /* modem                                */
+       { USB_DEVICE(0x22b8, 0x2d92) }, /* modem           + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port                      */
+       { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d96) }, /* modem                         + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d97) }, /* modem           + diagnostics + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port               + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
        { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
        .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
                                           data interface instead of
index 96f05b29c9ad23ba6c0874a29dc2066c16806476..79781461eec97ad27c4986178266a63034f4693f 100644 (file)
@@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                                        USB_PORT_FEAT_C_PORT_LINK_STATE);
                }
 
+               if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+                               hub_is_superspeed(hub->hdev)) {
+                       need_debounce_delay = true;
+                       clear_port_feature(hub->hdev, port1,
+                                       USB_PORT_FEAT_C_BH_PORT_RESET);
+               }
                /* We can forget about a "removed" device when there's a
                 * physical disconnect or the connect status changes.
                 */
index d6a8d8269bfbe6b68648d52068d846208aa9dd05..ecf12e15a7ef48a201c192dbc056b108c2416340 100644 (file)
@@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Logitech Webcam B/C500 */
        { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Logitech Webcam C600 */
+       { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech Webcam Pro 9000 */
        { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Logitech Webcam C905 */
+       { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C210 */
+       { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C260 */
+       { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech Webcam C310 */
        { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Logitech Webcam C910 */
+       { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C160 */
+       { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech Webcam C270 */
        { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Logitech Quickcam Pro 9000 */
+       { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Quickcam E3500 */
+       { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Quickcam Vision Pro */
+       { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech Harmony 700-series */
        { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
 
index 717ebc9ff941808e2ddd52c93db09cc998456565..600d82348511068b141dc65f7a6e7e7c79037a14 100644 (file)
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
                ret = -ENODEV;
                goto err0;
        }
-       dwc->revision = reg & DWC3_GSNPSREV_MASK;
+       dwc->revision = reg;
 
        dwc3_core_soft_reset(dwc);
 
index fa824cfdd2eb42bf0869257303835a4305a16690..25dbd8614e7226ccf915bfa30817226a10272f3c 100644 (file)
@@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
                        int             ret;
 
                        dep->endpoint.maxpacket = 1024;
+                       dep->endpoint.max_streams = 15;
                        dep->endpoint.ops = &dwc3_gadget_ep_ops;
                        list_add_tail(&dep->endpoint.ep_list,
                                        &dwc->gadget.ep_list);
index b21cd376c11af5978095c97f49bdee67aa1ec6d0..23a447373c51f0c7d3f9a08cadad7a90f217c24a 100644 (file)
@@ -469,7 +469,7 @@ config USB_LANGWELL
           gadget drivers to also be dynamically linked.
 
 config USB_EG20T
-       tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
+       tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
        depends on PCI
        select USB_GADGET_DUALSPEED
        help
@@ -485,10 +485,11 @@ config USB_EG20T
          This driver dose not support interrupt transfer or isochronous
          transfer modes.
 
-         This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
+         This driver also can be used for LAPIS Semiconductor's ML7213 which is
          for IVI(In-Vehicle Infotainment) use.
-         ML7213 is companion chip for Intel Atom E6xx series.
-         ML7213 is completely compatible for Intel EG20T PCH.
+         ML7831 is for general purpose use.
+         ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7831 is completely compatible for Intel EG20T PCH.
 
 config USB_CI13XXX_MSM
        tristate "MIPS USB CI13xxx for MSM"
index 4730016d7cd42d8644e8fb5fa8791fc9df080591..45f422ac103fb61678633ee08241b33b7dfe235a 100644 (file)
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
        u32 tmp;
 
        if (!driver || !bind || !driver->setup
-                       || driver->speed != USB_SPEED_HIGH)
+                       || driver->speed < USB_SPEED_HIGH)
                return -EINVAL;
        if (!dev)
                return -ENODEV;
index 4eedfe557154c285b9625d2117523ea3feef78ea..1fc612914c52e464ec89a82ec9e39048068c0b27 100644 (file)
@@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void)
        return platform_driver_register(&ci13xxx_msm_driver);
 }
 module_init(ci13xxx_msm_init);
+
+MODULE_LICENSE("GPL v2");
index 83428f56253bd5283a8962448fa8cd7b899aee96..9a0c3979ff43faa2c54f05efa5fe8c76d8f23dff 100644 (file)
@@ -71,6 +71,9 @@
 /******************************************************************************
  * DEFINE
  *****************************************************************************/
+
+#define DMA_ADDR_INVALID       (~(dma_addr_t)0)
+
 /* ctrl register bank access */
 static DEFINE_SPINLOCK(udc_lock);
 
@@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
                return -EALREADY;
 
        mReq->req.status = -EALREADY;
-       if (length && !mReq->req.dma) {
+       if (length && mReq->req.dma == DMA_ADDR_INVALID) {
                mReq->req.dma = \
                        dma_map_single(mEp->device, mReq->req.buf,
                                       length, mEp->dir ? DMA_TO_DEVICE :
@@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
                                dma_unmap_single(mEp->device, mReq->req.dma,
                                        length, mEp->dir ? DMA_TO_DEVICE :
                                        DMA_FROM_DEVICE);
-                               mReq->req.dma = 0;
+                               mReq->req.dma = DMA_ADDR_INVALID;
                                mReq->map     = 0;
                        }
                        return -ENOMEM;
@@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
        if (mReq->map) {
                dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
                                 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-               mReq->req.dma = 0;
+               mReq->req.dma = DMA_ADDR_INVALID;
                mReq->map     = 0;
        }
 
@@ -1610,7 +1613,6 @@ __acquires(mEp->lock)
  * @gadget: gadget
  *
  * This function returns an error code
- * Caller must hold lock
  */
 static int _gadget_stop_activity(struct usb_gadget *gadget)
 {
@@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
        mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
        if (mReq != NULL) {
                INIT_LIST_HEAD(&mReq->queue);
+               mReq->req.dma = DMA_ADDR_INVALID;
 
                mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
                                           &mReq->dma);
@@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
        if (mReq->map) {
                dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
                                 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-               mReq->req.dma = 0;
+               mReq->req.dma = DMA_ADDR_INVALID;
                mReq->map     = 0;
        }
        req->status = -ECONNRESET;
@@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget)
        spin_lock_irqsave(udc->lock, flags);
        if (!udc->remote_wakeup) {
                ret = -EOPNOTSUPP;
-               dbg_trace("remote wakeup feature is not enabled\n");
+               trace("remote wakeup feature is not enabled\n");
                goto out;
        }
        if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
                ret = -EINVAL;
-               dbg_trace("port is not suspended\n");
+               trace("port is not suspended\n");
                goto out;
        }
        hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
@@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
                if (udc->udc_driver->notify_event)
                        udc->udc_driver->notify_event(udc,
                        CI13XXX_CONTROLLER_STOPPED_EVENT);
+               spin_unlock_irqrestore(udc->lock, flags);
                _gadget_stop_activity(&udc->gadget);
+               spin_lock_irqsave(udc->lock, flags);
                pm_runtime_put(&udc->gadget.dev);
        }
 
@@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
        struct ci13xxx *udc;
        int retval = 0;
 
-       trace("%p, %p, %p", dev, regs, name);
+       trace("%p, %p, %p", dev, regs, driver->name);
 
        if (dev == NULL || regs == NULL || driver == NULL ||
                        driver->name == NULL)
index 596a0b464e61f069f93c8278a3c8193ab44a7873..4dff83d2f265235338f3b92f0e5b59690a96de25 100644 (file)
@@ -130,9 +130,6 @@ ep_matches (
                        num_req_streams = ep_comp->bmAttributes & 0x1f;
                        if (num_req_streams > ep->max_streams)
                                return 0;
-                       /* Update the ep_comp descriptor if needed */
-                       if (num_req_streams != ep->max_streams)
-                               ep_comp->bmAttributes = ep->max_streams;
                }
 
        }
index 52583a2353304acb25119365a5432c916e0aeb73..1a6f415c0d022f34d94e6a609658e47c5a31ed06 100644 (file)
@@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f,
                if (ctrl->bRequestType !=
                    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
                        break;
-               if (w_index != fsg->interface_number || w_value != 0)
+               if (w_index != fsg->interface_number || w_value != 0 ||
+                               w_length != 0)
                        return -EDOM;
 
                /*
@@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f,
                if (ctrl->bRequestType !=
                    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
                        break;
-               if (w_index != fsg->interface_number || w_value != 0)
+               if (w_index != fsg->interface_number || w_value != 0 ||
+                               w_length != 1)
                        return -EDOM;
                VDBG(fsg, "get max LUN\n");
                *(u8 *)req->buf = fsg->common->nluns - 1;
@@ -2973,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
        fsg_common_put(common);
        usb_free_descriptors(fsg->function.descriptors);
        usb_free_descriptors(fsg->function.hs_descriptors);
+       usb_free_descriptors(fsg->function.ss_descriptors);
        kfree(fsg);
 }
 
index 67b222908cf9fe264a3213b945f0beae5fa83569..3797b3d6c622bd49ff709b33303a86e323294c44 100644 (file)
@@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
 
 DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
 DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
-DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16);
 DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
 
 /* B.3.1  Standard AC Interface Descriptor */
@@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = {
        /* .wTotalLength =      DYNAMIC */
 };
 
-/* B.4.3  Embedded MIDI IN Jack Descriptor */
-static struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
-       .bLength =            USB_DT_MIDI_IN_SIZE,
-       .bDescriptorType =      USB_DT_CS_INTERFACE,
-       .bDescriptorSubtype =   USB_MS_MIDI_IN_JACK,
-       .bJackType =        USB_MS_EMBEDDED,
-       /* .bJackID =           DYNAMIC */
-};
-
-/* B.4.4  Embedded MIDI OUT Jack Descriptor */
-static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = {
-       /* .bLength =           DYNAMIC */
-       .bDescriptorType =      USB_DT_CS_INTERFACE,
-       .bDescriptorSubtype =   USB_MS_MIDI_OUT_JACK,
-       .bJackType =            USB_MS_EMBEDDED,
-       /* .bJackID =           DYNAMIC */
-       /* .bNrInputPins =      DYNAMIC */
-       /* .pins =              DYNAMIC */
-};
-
 /* B.5.1  Standard Bulk OUT Endpoint Descriptor */
 static struct usb_endpoint_descriptor bulk_out_desc = {
        .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -758,9 +737,11 @@ fail:
 static int __init
 f_midi_bind(struct usb_configuration *c, struct usb_function *f)
 {
-       struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12];
+       struct usb_descriptor_header **midi_function;
        struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
+       struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
        struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
+       struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
        struct usb_composite_dev *cdev = c->cdev;
        struct f_midi *midi = func_to_midi(f);
        int status, n, jack = 1, i = 0;
@@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
                goto fail;
        midi->out_ep->driver_data = cdev;       /* claim */
 
+       /* allocate temporary function list */
+       midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function),
+                               GFP_KERNEL);
+       if (!midi_function) {
+               status = -ENOMEM;
+               goto fail;
+       }
+
        /*
         * construct the function's descriptor set. As the number of
         * input and output MIDI ports is configurable, we have to do
@@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
 
        /* calculate the header's wTotalLength */
        n = USB_DT_MS_HEADER_SIZE
-               + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE
-               + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1);
+               + (midi->in_ports + midi->out_ports) *
+                       (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
        ms_header_desc.wTotalLength = cpu_to_le16(n);
 
        midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
 
-       /* we have one embedded IN jack */
-       jack_in_emb_desc.bJackID = jack++;
-       midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc;
-
-       /* and a dynamic amount of external IN jacks */
-       for (n = 0; n < midi->in_ports; n++) {
-               struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n];
-
-               ext->bLength =                  USB_DT_MIDI_IN_SIZE;
-               ext->bDescriptorType =          USB_DT_CS_INTERFACE;
-               ext->bDescriptorSubtype =       USB_MS_MIDI_IN_JACK;
-               ext->bJackType =                USB_MS_EXTERNAL;
-               ext->bJackID =                  jack++;
-               ext->iJack =                    0;
-
-               midi_function[i++] = (struct usb_descriptor_header *) ext;
-       }
-
-       /* one embedded OUT jack ... */
-       jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports);
-       jack_out_emb_desc.bJackID = jack++;
-       jack_out_emb_desc.bNrInputPins = midi->in_ports;
-       /* ... which referencess all external IN jacks */
+       /* configure the external IN jacks, each linked to an embedded OUT jack */
        for (n = 0; n < midi->in_ports; n++) {
-               jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID;
-               jack_out_emb_desc.pins[n].baSourcePin = 1;
+               struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
+               struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
+
+               in_ext->bLength                 = USB_DT_MIDI_IN_SIZE;
+               in_ext->bDescriptorType         = USB_DT_CS_INTERFACE;
+               in_ext->bDescriptorSubtype      = USB_MS_MIDI_IN_JACK;
+               in_ext->bJackType               = USB_MS_EXTERNAL;
+               in_ext->bJackID                 = jack++;
+               in_ext->iJack                   = 0;
+               midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+
+               out_emb->bLength                = USB_DT_MIDI_OUT_SIZE(1);
+               out_emb->bDescriptorType        = USB_DT_CS_INTERFACE;
+               out_emb->bDescriptorSubtype     = USB_MS_MIDI_OUT_JACK;
+               out_emb->bJackType              = USB_MS_EMBEDDED;
+               out_emb->bJackID                = jack++;
+               out_emb->bNrInputPins           = 1;
+               out_emb->pins[0].baSourcePin    = 1;
+               out_emb->pins[0].baSourceID     = in_ext->bJackID;
+               out_emb->iJack                  = 0;
+               midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+
+               /* link it to the endpoint */
+               ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
        }
 
-       midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc;
-
-       /* and multiple external OUT jacks ... */
+       /* configure the external OUT jacks, each linked to an embedded IN jack */
        for (n = 0; n < midi->out_ports; n++) {
-               struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n];
-               int m;
-
-               ext->bLength =                  USB_DT_MIDI_OUT_SIZE(1);
-               ext->bDescriptorType =          USB_DT_CS_INTERFACE;
-               ext->bDescriptorSubtype =       USB_MS_MIDI_OUT_JACK;
-               ext->bJackType =                USB_MS_EXTERNAL;
-               ext->bJackID =                  jack++;
-               ext->bNrInputPins =             1;
-               ext->iJack =                    0;
-               /* ... which all reference the same embedded IN jack */
-               for (m = 0; m < midi->out_ports; m++) {
-                       ext->pins[m].baSourceID =       jack_in_emb_desc.bJackID;
-                       ext->pins[m].baSourcePin =      1;
-               }
-
-               midi_function[i++] = (struct usb_descriptor_header *) ext;
+               struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
+               struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
+
+               in_emb->bLength                 = USB_DT_MIDI_IN_SIZE;
+               in_emb->bDescriptorType         = USB_DT_CS_INTERFACE;
+               in_emb->bDescriptorSubtype      = USB_MS_MIDI_IN_JACK;
+               in_emb->bJackType               = USB_MS_EMBEDDED;
+               in_emb->bJackID                 = jack++;
+               in_emb->iJack                   = 0;
+               midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+
+               out_ext->bLength =              USB_DT_MIDI_OUT_SIZE(1);
+               out_ext->bDescriptorType =      USB_DT_CS_INTERFACE;
+               out_ext->bDescriptorSubtype =   USB_MS_MIDI_OUT_JACK;
+               out_ext->bJackType =            USB_MS_EXTERNAL;
+               out_ext->bJackID =              jack++;
+               out_ext->bNrInputPins =         1;
+               out_ext->iJack =                0;
+               out_ext->pins[0].baSourceID =   in_emb->bJackID;
+               out_ext->pins[0].baSourcePin =  1;
+               midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+
+               /* link it to the endpoint */
+               ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
        }
 
        /* configure the endpoint descriptors ... */
        ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
        ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
-       for (n = 0; n < midi->in_ports; n++)
-               ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID;
 
        ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
        ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
-       for (n = 0; n < midi->out_ports; n++)
-               ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID;
 
        /* ... and add them to the list */
        midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
@@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
                f->descriptors = usb_copy_descriptors(midi_function);
        }
 
+       kfree(midi_function);
+
        return 0;
 
 fail:
index 3490770333383d3d63c08977901b9311ff7cef15..7cdcb63b21ff6b3605591d87de5d413bc470c61e 100644 (file)
@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
 static int
 pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
 {
-       struct net_device *dev = fp->dev;
        struct page *page;
        int err;
 
-       page = __netdev_alloc_page(dev, gfp_flags);
+       page = alloc_page(gfp_flags);
        if (!page)
                return -ENOMEM;
 
@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
 
        err = usb_ep_queue(fp->out_ep, req, gfp_flags);
        if (unlikely(err))
-               netdev_free_page(dev, page);
+               put_page(page);
        return err;
 }
 
@@ -346,7 +345,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
                }
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                               skb->len == 0, req->actual);
+                               skb->len <= 1, req->actual);
                page = NULL;
 
                if (req->actual < req->length) { /* Last fragment */
@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
        }
 
        if (page)
-               netdev_free_page(dev, page);
+               put_page(page);
        if (req)
-               pn_rx_submit(fp, req, GFP_ATOMIC);
+               pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 
                        netif_carrier_on(dev);
                        for (i = 0; i < phonet_rxq_size; i++)
-                               pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+                               pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
                }
                spin_unlock(&port->lock);
                return 0;
index 91fdf790ed20b122bf0a13df0d3c8aed5285ac3c..cf33a8d0fd5df46ec339f5b481b68dc61121243a 100644 (file)
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
        }
        if (!gser->port.in->desc || !gser->port.out->desc) {
                DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
-               if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
-                   !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+               if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+                   config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
                        gser->port.in->desc = NULL;
                        gser->port.out->desc = NULL;
                        return -EINVAL;
index f7e39b0365cee5de76bc66d1942eeb2e322d6123..11b5196284aed6253180b699094cd7b6dbc762ea 100644 (file)
@@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg,
                        if (ctrl->bRequestType != (USB_DIR_OUT |
                                        USB_TYPE_CLASS | USB_RECIP_INTERFACE))
                                break;
-                       if (w_index != 0 || w_value != 0) {
+                       if (w_index != 0 || w_value != 0 || w_length != 0) {
                                value = -EDOM;
                                break;
                        }
@@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg,
                        if (ctrl->bRequestType != (USB_DIR_IN |
                                        USB_TYPE_CLASS | USB_RECIP_INTERFACE))
                                break;
-                       if (w_index != 0 || w_value != 0) {
+                       if (w_index != 0 || w_value != 0 || w_length != 1) {
                                value = -EDOM;
                                break;
                        }
index 43a49ecc1f36ed1009eb836350bd07800a4d8c09..dcbc0a2e48dde8be9b27a5c429a4c004bfb8aa9c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/fsl_devices.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 
 #include <mach/hardware.h>
 
@@ -88,7 +89,6 @@ eenahb:
 void fsl_udc_clk_finalize(struct platform_device *pdev)
 {
        struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
        if (cpu_is_mx35()) {
                unsigned int v;
 
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
                                        USBPHYCTRL_OTGBASE_OFFSET));
                }
        }
-#endif
 
        /* ULPI transceivers don't need usbpll */
        if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
index 2a03e4de11c1a277cfe27b670ac48f11cabc006a..e00cf92409ce5114f061801c3d3e29b6b6bae23e 100644 (file)
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                       && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
index b2c44e1d58134c185966e672508ec0a9f8d24d84..dd28ef3def71f394f281b70f8ee2335b629f42e6 100644 (file)
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
                kfree(req);
 }
 
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+       struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+       /* Write dQH next pointer and terminate bit to 0 */
+       qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+                       & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+       /* Clear active and halt bit */
+       qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+                                       | EP_QUEUE_HEAD_STATUS_HALT));
+
+       /* Ensure that updates to the QH will occur before priming. */
+       wmb();
+
+       /* Prime endpoint by writing correct bit to ENDPTPRIME */
+       fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+                       : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
 static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
 {
-       int i = ep_index(ep) * 2 + ep_is_in(ep);
        u32 temp, bitmask, tmp_stat;
-       struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
 
        /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
        VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                        cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
                /* Read prime bit, if 1 goto done */
                if (fsl_readl(&dr_regs->endpointprime) & bitmask)
-                       goto out;
+                       return;
 
                do {
                        /* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
 
                if (tmp_stat)
-                       goto out;
+                       return;
        }
 
-       /* Write dQH next pointer and terminate bit to 0 */
-       temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-       dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
-       /* Clear active and halt bit */
-       temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
-                       | EP_QUEUE_HEAD_STATUS_HALT));
-       dQH->size_ioc_int_sts &= temp;
-
-       /* Ensure that updates to the QH will occur before priming. */
-       wmb();
-
-       /* Prime endpoint by writing 1 to ENDPTPRIME */
-       temp = ep_is_in(ep)
-               ? (1 << (ep_index(ep) + 16))
-               : (1 << (ep_index(ep)));
-       fsl_writel(temp, &dr_regs->endpointprime);
-out:
-       return;
+       fsl_prime_ep(ep, req->head);
 }
 
 /* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
                VDBG("%s, bad ep", __func__);
                return -EINVAL;
        }
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                if (req->req.length > ep->ep.maxpacket)
                        return -EMSGSIZE;
        }
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
                /* The request isn't the last request in this ep queue */
                if (req->queue.next != &ep->queue) {
-                       struct ep_queue_head *qh;
                        struct fsl_req *next_req;
 
-                       qh = ep->qh;
                        next_req = list_entry(req->queue.next, struct fsl_req,
                                        queue);
 
-                       /* Point the QH to the first TD of next request */
-                       fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+                       /* prime with dTD of next request */
+                       fsl_prime_ep(ep, next_req->head);
                }
-
-               /* The request hasn't been processed, patch up the TD chain */
+       /* The request hasn't been processed, patch up the TD chain */
        } else {
                struct fsl_req *prev_req;
 
                prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
-               fsl_writel(fsl_readl(&req->tail->next_td_ptr),
-                               &prev_req->tail->next_td_ptr);
-
+               prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
        }
 
        done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
                goto out;
        }
 
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                status = -EOPNOTSUPP;
                goto out;
        }
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        struct fsl_udc *udc;
        int size = 0;
        u32 bitmask;
-       struct ep_queue_head *d_qh;
+       struct ep_queue_head *qh;
 
        ep = container_of(_ep, struct fsl_ep, ep);
        if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
                return -ESHUTDOWN;
 
-       d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+       qh = get_qh_by_ep(ep);
 
        bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
            (1 << (ep_index(ep)));
 
        if (fsl_readl(&dr_regs->endptstatus) & bitmask)
-               size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+               size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
                    >> DTD_LENGTH_BIT_POS;
 
        pr_debug("%s %u\n", __func__, size);
@@ -1717,7 +1713,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
 
 static inline enum usb_device_speed portscx_device_speed(u32 reg)
 {
-       switch (speed & PORTSCX_PORT_SPEED_MASK) {
+       switch (reg & PORTSCX_PORT_SPEED_MASK) {
        case PORTSCX_PORT_SPEED_HIGH:
                return USB_SPEED_HIGH;
        case PORTSCX_PORT_SPEED_FULL:
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                               && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
@@ -2480,8 +2475,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
 
 #ifndef CONFIG_ARCH_MXC
        if (pdata->have_sysif_regs)
-               usb_sys_regs = (struct usb_sys_interface *)
-                               ((u32)dr_regs + USB_DR_SYS_OFFSET);
+               usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
 #endif
 
        /* Initialize USB clocks */
index 1d51be83fda87402d4a77a28ff4fa636d17bff62..f781f5dec41776629584a33a1be5817d8e6778d6 100644 (file)
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
                                        * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
 #define get_pipe_by_ep(EP)     (ep_index(EP) * 2 + ep_is_in(EP))
 
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+       /* we only have one ep0 structure but two queue heads */
+       if (ep_index(ep) != 0)
+               return ep->qh;
+       else
+               return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+                               USB_DIR_IN) ? 1 : 0];
+}
+
 struct platform_device;
 #ifdef CONFIG_ARCH_MXC
 int fsl_udc_clk_init(struct platform_device *pdev);
index a392ec0d2d5179beebf3923dc6b275001b4961c5..6ccae2707e596c3f24d5705495264f0dcfa4f31b 100644 (file)
@@ -1730,8 +1730,9 @@ static void
 gadgetfs_disconnect (struct usb_gadget *gadget)
 {
        struct dev_data         *dev = get_gadget_data (gadget);
+       unsigned long           flags;
 
-       spin_lock (&dev->lock);
+       spin_lock_irqsave (&dev->lock, flags);
        if (dev->state == STATE_DEV_UNCONNECTED)
                goto exit;
        dev->state = STATE_DEV_UNCONNECTED;
@@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
        next_event (dev, GADGETFS_DISCONNECT);
        ep0_readable (dev);
 exit:
-       spin_unlock (&dev->lock);
+       spin_unlock_irqrestore (&dev->lock, flags);
 }
 
 static void
index 91d0af2a24a8537728fb5c5cb176ff0108095ab5..9aa1cbbee45b64597ef9bcc44d5ba5ed219b2d6a 100644 (file)
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
        int retval;
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !bind
                        || !driver->setup)
                return -EINVAL;
index 7f1bc9a73cda5a1fe45f8c053f426407758e0f50..da2b9d0be3ca0d444d9df8ca5f0b7a9800c3d078 100644 (file)
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
         * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
         * "must not be used in normal operation"
         */
-       if (!driver || driver->speed != USB_SPEED_HIGH
+       if (!driver || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
 
index 550d6dcdf10454253cdb3d93f6a7d86219ce4a50..5048a0c07640a4ed649c848a43ae4b83c5298be4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -354,6 +354,7 @@ struct pch_udc_dev {
 #define PCI_DEVICE_ID_INTEL_EG20T_UDC  0x8808
 #define PCI_VENDOR_ID_ROHM             0x10DB
 #define PCI_DEVICE_ID_ML7213_IOH_UDC   0x801D
+#define PCI_DEVICE_ID_ML7831_IOH_UDC   0x8808
 
 static const char      ep0_string[] = "ep0in";
 static DEFINE_SPINLOCK(udc_stall_spinlock);    /* stall spin lock */
@@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
                .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
                .class_mask = 0xffffffff,
        },
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
+               .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+               .class_mask = 0xffffffff,
+       },
        { 0 },
 };
 
@@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void)
 module_exit(pch_udc_pci_exit);
 
 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
-MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
 MODULE_LICENSE("GPL");
index 68a826a1b866e8a9191711da9c5e0da40eaf2e22..fc719a3f855717b88417aaff8c2e89d9a2723109 100644 (file)
@@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep)
        if (list_empty(&ep->queue) && !ep->busy) {
                pipe_stop(ep->r8a66597, ep->pipenum);
                r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
+               r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
+               r8a66597_write(ep->r8a66597, 0, ep->pipectr);
        }
        spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
 }
@@ -1742,26 +1744,16 @@ static int r8a66597_start(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
-       int retval;
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
        if (!r8a66597)
                return -ENODEV;
 
        /* hook up the driver */
-       driver->driver.bus = NULL;
        r8a66597->driver = driver;
-       r8a66597->gadget.dev.driver = &driver->driver;
-
-       retval = device_add(&r8a66597->gadget.dev);
-       if (retval) {
-               dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
-                       retval);
-               goto error;
-       }
 
        init_controller(r8a66597);
        r8a66597_bset(r8a66597, VBSE, INTENB0);
@@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
        }
 
        return 0;
-
-error:
-       r8a66597->driver = NULL;
-       r8a66597->gadget.dev.driver = NULL;
-
-       return retval;
 }
 
 static int r8a66597_stop(struct usb_gadget *gadget,
@@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget,
        disable_controller(r8a66597);
        spin_unlock_irqrestore(&r8a66597->lock, flags);
 
-       device_del(&r8a66597->gadget.dev);
        r8a66597->driver = NULL;
        return 0;
 }
@@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
                clk_put(r8a66597->clk);
        }
 #endif
+       device_unregister(&r8a66597->gadget.dev);
        kfree(r8a66597);
        return 0;
 }
@@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev)
        r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
 
        r8a66597->gadget.ops = &r8a66597_gadget_ops;
-       device_initialize(&r8a66597->gadget.dev);
        dev_set_name(&r8a66597->gadget.dev, "gadget");
        r8a66597->gadget.is_dualspeed = 1;
        r8a66597->gadget.dev.parent = &pdev->dev;
        r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
        r8a66597->gadget.dev.release = pdev->dev.release;
        r8a66597->gadget.name = udc_name;
+       ret = device_register(&r8a66597->gadget.dev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "device_register failed\n");
+               goto clean_up;
+       }
 
        init_timer(&r8a66597->timer);
        r8a66597->timer.function = r8a66597_timer;
@@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
                                clk_name);
                        ret = PTR_ERR(r8a66597->clk);
-                       goto clean_up;
+                       goto clean_up_dev;
                }
                clk_enable(r8a66597->clk);
        }
@@ -2014,7 +2004,9 @@ clean_up2:
                clk_disable(r8a66597->clk);
                clk_put(r8a66597->clk);
        }
+clean_up_dev:
 #endif
+       device_unregister(&r8a66597->gadget.dev);
 clean_up:
        if (r8a66597) {
                if (r8a66597->sudmac_reg)
index a552453dc94632cacee3ea5212f60aa4563fa519..b31448229f0b26c3a38014cf59e3339060253cb3 100644 (file)
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
                return -EINVAL;
        }
 
-       if (driver->speed != USB_SPEED_HIGH &&
-           driver->speed != USB_SPEED_FULL) {
+       if (driver->speed < USB_SPEED_FULL)
                dev_err(hsotg->dev, "%s: bad speed\n", __func__);
-       }
 
        if (!bind || !driver->setup) {
                dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
index 8d54f893cefe9df7c97363c1812b4f30bb822ff7..20a553b46aedc1d17ecaeaf033ff62eb8f5f472c 100644 (file)
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
        int ret;
 
        if (!driver
-               || (driver->speed != USB_SPEED_FULL &&
-                       driver->speed != USB_SPEED_HIGH)
+               || driver->speed < USB_SPEED_FULL
                || !bind
                || !driver->unbind || !driver->disconnect || !driver->setup)
                return -EINVAL;
index 022baeca7c94f4f55819d6cf2be5550752e3318a..6939e17f4580099af82d236c413547fe3660a18d 100644 (file)
@@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
        kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
 
        if (udc_is_newstyle(udc)) {
-               usb_gadget_disconnect(udc->gadget);
+               udc->driver->disconnect(udc->gadget);
                udc->driver->unbind(udc->gadget);
                usb_gadget_udc_stop(udc->gadget, udc->driver);
-
+               usb_gadget_disconnect(udc->gadget);
        } else {
                usb_gadget_stop(udc->gadget, udc->driver);
        }
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
 static ssize_t usb_udc_srp_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t n)
 {
-       struct usb_udc          *udc = dev_get_drvdata(dev);
+       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
 
        if (sysfs_streq(buf, "1"))
                usb_gadget_wakeup(udc->gadget);
@@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        usb_speed_string(udc->gadget->speed));
 }
-static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL);
+static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
 
 #define USB_UDC_ATTR(name)                                     \
 ssize_t usb_udc_##name##_show(struct device *dev,              \
@@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev,           \
                                                                \
        return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name);  \
 }                                                              \
-static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL)
+static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
 
 static USB_UDC_ATTR(is_dualspeed);
 static USB_UDC_ATTR(is_otg);
index 2e829fae648291d1fc885653e274b7e891d4e09e..a60679cbbf858e3c97a978218e16d931635ae1a3 100644 (file)
@@ -1475,30 +1475,36 @@ iso_stream_schedule (
         * jump until after the queue is primed.
         */
        else {
+               int done = 0;
                start = SCHEDULE_SLOP + (now & ~0x07);
 
                /* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
 
-               /* find a uframe slot with enough bandwidth */
-               next = start + period;
-               for (; start < next; start++) {
-
+               /* find a uframe slot with enough bandwidth.
+                * Early uframes are more precious because full-speed
+                * iso IN transfers can't use late uframes,
+                * and therefore they should be allocated last.
+                */
+               next = start;
+               start += period;
+               do {
+                       start--;
                        /* check schedule: enough space? */
                        if (stream->highspeed) {
                                if (itd_slot_ok(ehci, mod, start,
                                                stream->usecs, period))
-                                       break;
+                                       done = 1;
                        } else {
                                if ((start % 8) >= 6)
                                        continue;
                                if (sitd_slot_ok(ehci, mod, stream,
                                                start, sched, period))
-                                       break;
+                                       done = 1;
                        }
-               }
+               } while (start > next && !done);
 
                /* no room in the schedule */
-               if (start == next) {
+               if (!done) {
                        ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
                                urb, now, now + mod);
                        status = -ENOSPC;
index fe74bd6760187c221e79dae448005c07a25863be..b4fb511d24bcda76a4101aa605e01384c5962d21 100644 (file)
@@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd)
 
        ehci->caps = hcd->regs;
        ehci->regs = hcd->regs +
-               HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+               HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
        dbg_hcs_params(ehci, "reset");
        dbg_hcc_params(ehci, "reset");
 
index a7dc1e1d45f2a77e1cfce4de339b449b8863f778..2ac4ac2e4ef95208be6d1ae2b42af1840cb42bb9 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "isp1760-hcd.h"
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -31,7 +31,7 @@
 #include <linux/pci.h>
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 struct isp1760 {
        struct usb_hcd *hcd;
        int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
        ret = platform_driver_register(&isp1760_plat_driver);
        if (!ret)
                any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        ret = platform_driver_register(&isp1760_of_driver);
        if (!ret)
                any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
 static void __exit isp1760_exit(void)
 {
        platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        platform_driver_unregister(&isp1760_of_driver);
 #endif
 #ifdef CONFIG_PCI
index ba3a46b78b75a7bd16e54e7e9d3690dd5f9df4ef..95a9fec38e89b4e8a6daff8a3c0c75fd73315d4c 100644 (file)
@@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
        if (port < 0 || port >= 2)
                return;
 
+       if (pdata->vbus_pin[port] <= 0)
+               return;
+
        gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
 }
 
@@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
        if (port < 0 || port >= 2)
                return -EINVAL;
 
+       if (pdata->vbus_pin[port] <= 0)
+               return -EINVAL;
+
        return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
 }
 
index 34efd479e068cbd8133fbd98f8c2a2cd47efa862..b2639191549e88172e803281a6c53300bd103b67 100644 (file)
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
        struct ohci_hcd *ohci;
 
        ohci = hcd_to_ohci (hcd);
-       ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
-       ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+       ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
 
-       /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
-       ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
-                       OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
-                       OHCI_CTRL_RWC);
-       ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+       /* Software reset, after which the controller goes into SUSPEND */
+       ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
+       ohci_readl(ohci, &ohci->regs->cmdstatus);       /* flush the writes */
+       udelay(10);
 
-       /* flush the writes */
-       (void) ohci_readl (ohci, &ohci->regs->control);
+       ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
 }
 
 static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
index ad8166c681e2894424b81850aa3a366108d8d11c..bc01b064585ac9da1d232106586d71937b78db66 100644 (file)
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
        return 0;
 }
 
-/* nVidia controllers continue to drive Reset signalling on the bus
- * even after system shutdown, wasting power.  This flag tells the
- * shutdown routine to leave the controller OPERATIONAL instead of RESET.
- */
-static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
-{
-       struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
-       struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
-       /* Evidently nVidia fixed their later hardware; this is a guess at
-        * the changeover point.
-        */
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB          0x026d
-
-       if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
-               ohci->flags |= OHCI_QUIRK_SHUTDOWN;
-               ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
-       }
-
-       return 0;
-}
-
 static void sb800_prefetch(struct ohci_hcd *ohci, int on)
 {
        struct pci_dev *pdev;
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
                PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
                .driver_data = (unsigned long)ohci_quirk_amd700,
        },
-       {
-               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
-               .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
-       },
 
        /* FIXME for some of the early AMD 760 southbridges, OHCI
         * won't work at all.  blacklist them.
index 35e5fd640ce75388c7f373544c62e965f34c0111..0795b934d00c9709800948c137a1aa84fd96e47d 100644 (file)
@@ -403,7 +403,6 @@ struct ohci_hcd {
 #define        OHCI_QUIRK_HUB_POWER    0x100                   /* distrust firmware power/oc setup */
 #define        OHCI_QUIRK_AMD_PLL      0x200                   /* AMD PLL quirk*/
 #define        OHCI_QUIRK_AMD_PREFETCH 0x400                   /* pre-fetch for ISO transfer */
-#define        OHCI_QUIRK_SHUTDOWN     0x800                   /* nVidia power bug */
        // there are also chip quirks/bugs in init logic
 
        struct work_struct      nec_work;       /* Worker for NEC quirk */
index 27a3dec32fa2f78895175f32a89f5e8f0270f7cc..caf87428ca43c3f38985d9469b566645ea3d3bf9 100644 (file)
@@ -37,6 +37,7 @@
 #define OHCI_INTRENABLE                0x10
 #define OHCI_INTRDISABLE       0x14
 #define OHCI_FMINTERVAL                0x34
+#define OHCI_HCFS              (3 << 6)        /* hc functional state */
 #define OHCI_HCR               (1 << 0)        /* host controller reset */
 #define OHCI_OCR               (1 << 3)        /* ownership change request */
 #define OHCI_CTRL_RWC          (1 << 9)        /* remote wakeup connected */
@@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
 {
        void __iomem *base;
        u32 control;
+       u32 fminterval;
+       int cnt;
 
        if (!mmio_resource_enabled(pdev, 0))
                return;
@@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
        }
 #endif
 
-       /* reset controller, preserving RWC (and possibly IR) */
-       writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
-       readl(base + OHCI_CONTROL);
+       /* disable interrupts */
+       writel((u32) ~0, base + OHCI_INTRDISABLE);
 
-       /* Some NVIDIA controllers stop working if kept in RESET for too long */
-       if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
-               u32 fminterval;
-               int cnt;
+       /* Reset the USB bus, if the controller isn't already in RESET */
+       if (control & OHCI_HCFS) {
+               /* Go into RESET, preserving RWC (and possibly IR) */
+               writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+               readl(base + OHCI_CONTROL);
 
-               /* drive reset for at least 50 ms (7.1.7.5) */
+               /* drive bus reset for at least 50 ms (7.1.7.5) */
                msleep(50);
+       }
 
-               /* software reset of the controller, preserving HcFmInterval */
-               fminterval = readl(base + OHCI_FMINTERVAL);
-               writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+       /* software reset of the controller, preserving HcFmInterval */
+       fminterval = readl(base + OHCI_FMINTERVAL);
+       writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 
-               /* reset requires max 10 us delay */
-               for (cnt = 30; cnt > 0; --cnt) {        /* ... allow extra time */
-                       if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
-                               break;
-                       udelay(1);
-               }
-               writel(fminterval, base + OHCI_FMINTERVAL);
-
-               /* Now we're in the SUSPEND state with all devices reset
-                * and wakeups and interrupts disabled
-                */
+       /* reset requires max 10 us delay */
+       for (cnt = 30; cnt > 0; --cnt) {        /* ... allow extra time */
+               if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+                       break;
+               udelay(1);
        }
+       writel(fminterval, base + OHCI_FMINTERVAL);
 
-       /*
-        * disable interrupts
-        */
-       writel(~(u32)0, base + OHCI_INTRDISABLE);
-       writel(~(u32)0, base + OHCI_INTRSTATUS);
-
+       /* Now the controller is safely in SUSPEND and nothing can wake it up */
        iounmap(base);
 }
 
@@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
        void __iomem *base, *op_reg_base;
        u32     hcc_params, cap, val;
        u8      offset, cap_length;
-       int     wait_time, delta, count = 256/4;
+       int     wait_time, count = 256/4;
 
        if (!mmio_resource_enabled(pdev, 0))
                return;
@@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
                writel(val, op_reg_base + EHCI_USBCMD);
 
                wait_time = 2000;
-               delta = 100;
                do {
                        writel(0x3f, op_reg_base + EHCI_USBSTS);
-                       udelay(delta);
-                       wait_time -= delta;
+                       udelay(100);
+                       wait_time -= 100;
                        val = readl(op_reg_base + EHCI_USBSTS);
                        if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
                                break;
index d6e175428618d4b31f69bd3d323e7c5aef93deae..a403b53e86b9fd3dcc742b9da5759330fbc4d212 100644 (file)
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
 {
        qset->td_start = qset->td_end = qset->ntds = 0;
 
-       qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+       qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
        qset->qh.err_count = 0;
        qset->qh.scratch[0] = 0;
index 42a22b8e692262fde4eab6b2133fc6cf485555a7..0e4b25fa3bcd262a898d6337aeeb9b6cec7b5600 100644 (file)
@@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        struct xhci_virt_device *dev;
        struct xhci_ep_ctx      *ep0_ctx;
        struct xhci_slot_ctx    *slot_ctx;
-       struct xhci_input_control_ctx *ctrl_ctx;
        u32                     port_num;
        struct usb_device *top_dev;
 
@@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
                return -EINVAL;
        }
        ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
        slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
 
-       /* 2) New slot context and endpoint 0 context are valid*/
-       ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
-
        /* 3) Only the control endpoint is valid - one endpoint context */
        slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
        switch (udev->speed) {
index 940321b3ec68b477549a38d4016ee047fb7a72a7..9f1d4b15d818553fc488c8651a14d0e4b3301f66 100644 (file)
@@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        struct xhci_ring *ring;
        struct xhci_td *cur_td;
        int ret, i, j;
+       unsigned long flags;
 
        ep = (struct xhci_virt_ep *) arg;
        xhci = ep->xhci;
 
-       spin_lock(&xhci->lock);
+       spin_lock_irqsave(&xhci->lock, flags);
 
        ep->stop_cmds_pending--;
        if (xhci->xhc_state & XHCI_STATE_DYING) {
                xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
                                "xHCI as DYING, exiting.\n");
-               spin_unlock(&xhci->lock);
+               spin_unlock_irqrestore(&xhci->lock, flags);
                return;
        }
        if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
                xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
                                "exiting.\n");
-               spin_unlock(&xhci->lock);
+               spin_unlock_irqrestore(&xhci->lock, flags);
                return;
        }
 
@@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        xhci->xhc_state |= XHCI_STATE_DYING;
        /* Disable interrupts from the host controller and start halting it */
        xhci_quiesce(xhci);
-       spin_unlock(&xhci->lock);
+       spin_unlock_irqrestore(&xhci->lock, flags);
 
        ret = xhci_halt(xhci);
 
-       spin_lock(&xhci->lock);
+       spin_lock_irqsave(&xhci->lock, flags);
        if (ret < 0) {
                /* This is bad; the host is not responding to commands and it's
                 * not allowing itself to be halted.  At least interrupts are
@@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
                        }
                }
        }
-       spin_unlock(&xhci->lock);
+       spin_unlock_irqrestore(&xhci->lock, flags);
        xhci_dbg(xhci, "Calling usb_hc_died()\n");
        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
        xhci_dbg(xhci, "xHCI host controller is dead.\n");
index 1ff95a0df576762000796440a9b82aae53832718..a1afb7c39f7e70c36c12a8bccdc0a0b64b7f29d3 100644 (file)
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        ring = xhci->cmd_ring;
        seg = ring->deq_seg;
        do {
-               memset(seg->trbs, 0, SEGMENT_SIZE);
+               memset(seg->trbs, 0,
+                       sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+               seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+                       cpu_to_le32(~TRB_CYCLE);
                seg = seg->next;
        } while (seg != ring->deq_seg);
 
@@ -799,7 +802,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
-       int                     retval;
+       int                     retval = 0;
 
        /* Wait a bit if either of the roothubs need to settle from the
         * transition into bus suspend.
@@ -809,6 +812,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                                xhci->bus_state[1].next_statechange))
                msleep(100);
 
+       set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+       set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+
        spin_lock_irq(&xhci->lock);
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                hibernated = true;
@@ -878,20 +884,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                        return retval;
                xhci_dbg(xhci, "Start the primary HCD\n");
                retval = xhci_run(hcd->primary_hcd);
-               if (retval)
-                       goto failed_restart;
-
-               xhci_dbg(xhci, "Start the secondary HCD\n");
-               retval = xhci_run(secondary_hcd);
                if (!retval) {
-                       set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-                       set_bit(HCD_FLAG_HW_ACCESSIBLE,
-                                       &xhci->shared_hcd->flags);
+                       xhci_dbg(xhci, "Start the secondary HCD\n");
+                       retval = xhci_run(secondary_hcd);
                }
-failed_restart:
                hcd->state = HC_STATE_SUSPENDED;
                xhci->shared_hcd->state = HC_STATE_SUSPENDED;
-               return retval;
+               goto done;
        }
 
        /* step 4: set Run/Stop bit */
@@ -910,11 +909,14 @@ failed_restart:
         * Running endpoints by ringing their doorbells
         */
 
-       set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-       set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
-
        spin_unlock_irq(&xhci->lock);
-       return 0;
+
+ done:
+       if (retval == 0) {
+               usb_hcd_resume_root_hub(hcd);
+               usb_hcd_resume_root_hub(xhci->shared_hcd);
+       }
+       return retval;
 }
 #endif /* CONFIG_PM */
 
@@ -3504,6 +3506,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        /* Otherwise, update the control endpoint ring enqueue pointer. */
        else
                xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+       ctrl_ctx->drop_flags = 0;
+
        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
        xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
 
@@ -3585,7 +3591,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
                + 1;
        /* Zero the input context control for later use */
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
        ctrl_ctx->add_flags = 0;
        ctrl_ctx->drop_flags = 0;
 
index fc34b8b1191055bcb568a171a518d389b4266f25..07a03460a598c3bcea675494a2a9009312b5d57b 100644 (file)
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
        select TWL4030_USB if MACH_OMAP_3430SDP
        select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
        select USB_OTG_UTILS
+       select USB_GADGET_DUALSPEED
        tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
        help
          Say Y here if your system has a dual role high speed USB
@@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN
 
 config USB_MUSB_UX500
        tristate "U8500 and U5500"
-       depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500)
+       depends on (ARCH_U8500 && AB8500_USB)
 
 endchoice
 
index 08f1d0b662a37b64558e70777bf13f609da1bb47..e233d2b7d335713a3af128258f8634014aa3c5fe 100644 (file)
@@ -27,6 +27,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
index 4da7492ddbdba25768f4acbce91c1802195f48da..2613bfdb09b65da2ecb274edbf160b237bfde066 100644 (file)
@@ -27,6 +27,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
index 20a28731c338e8aa9dbf2bac295ce8ddda426e3a..b63ab1570103f2219afc5b44bdea08acd0531054 100644 (file)
@@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
-       defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
-       defined(CONFIG_ARCH_U5500)
+       defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500)
 
 static irqreturn_t generic_interrupt(int irq, void *__hci)
 {
@@ -2302,18 +2301,12 @@ static int musb_suspend(struct device *dev)
                 */
        }
 
-       musb_save_context(musb);
-
        spin_unlock_irqrestore(&musb->lock, flags);
        return 0;
 }
 
 static int musb_resume_noirq(struct device *dev)
 {
-       struct musb     *musb = dev_to_musb(dev);
-
-       musb_restore_context(musb);
-
        /* for static cmos like DaVinci, register values were preserved
         * unless for some reason the whole soc powered down or the USB
         * module got reset through the PSC (vs just being disabled).
index ae4a20acef6c6e1265f2666d00c09cdee4d19032..922148ff8d2969de64a808046f7ce06c6115ace1 100644 (file)
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
        unsigned long           flags;
        int                     retval = -EINVAL;
 
-       if (driver->speed != USB_SPEED_HIGH)
+       if (driver->speed < USB_SPEED_HIGH)
                goto err0;
 
        pm_runtime_get_sync(musb->controller);
@@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
                                        nuke(&hw_ep->ep_out, -ESHUTDOWN);
                        }
                }
-
-               spin_unlock(&musb->lock);
-               driver->disconnect(&musb->g);
-               spin_lock(&musb->lock);
        }
 }
 
index 60ddba8066ea201181b0b6ce3c69a847eafc618a..79cb0af779fa07dac0702ee9b2b2d43e0a22b738 100644 (file)
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
                        if (musb->double_buffer_not_ok)
                                musb_writew(epio, MUSB_TXMAXP,
                                                hw_ep->max_packet_sz_tx);
+                       else if (can_bulk_split(musb, qh->type))
+                               musb_writew(epio, MUSB_TXMAXP, packet_sz
+                                       | ((hw_ep->max_packet_sz_tx /
+                                               packet_sz) - 1) << 11);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
                                                qh->maxpacket |
index d2e2efaba658c6c20c485e2de2d839b8a926d005..08c679c0dde5ae110a56ec70af33819fdd20cfeb 100644 (file)
@@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
 /*
  *             platform functions
  */
-static int __devinit usbhs_probe(struct platform_device *pdev)
+static int usbhs_probe(struct platform_device *pdev)
 {
        struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
        struct renesas_usbhs_driver_callback *dfunc;
index 8da685e796d1484cadbb3bcda67a5ee26ac64c44..ffdf5d15085ebbe845dbd54474c8a0e5b9464061 100644 (file)
@@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
        if (len % 4) /* 32bit alignment */
                goto usbhsf_pio_prepare_push;
 
-       if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+       if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
                goto usbhsf_pio_prepare_push;
 
        /* get enable DMA fifo */
@@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
        if (!fifo)
                goto usbhsf_pio_prepare_pop;
 
-       if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+       if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
                goto usbhsf_pio_prepare_pop;
 
        ret = usbhsf_fifo_select(pipe, fifo, 0);
index 053f86d70009f4a5616c2b303c72c4988fd12d74..ad96a38967299f4895b74ae0fa7bb416384706f8 100644 (file)
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
                if (mod->irq_attch)
                        intenb1 |= ATTCHE;
 
-               if (mod->irq_attch)
+               if (mod->irq_dtch)
                        intenb1 |= DTCHE;
 
                if (mod->irq_sign)
index 8ae3733031cdb8fa17ec146d31e5d42e9eea120a..6c6875533f019096af0518e6c92d9f430c4edc49 100644 (file)
@@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod);
  */
 #if    defined(CONFIG_USB_RENESAS_USBHS_HCD) || \
        defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE)
-extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv);
-extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv);
+extern int usbhs_mod_host_probe(struct usbhs_priv *priv);
+extern int usbhs_mod_host_remove(struct usbhs_priv *priv);
 #else
 static inline int usbhs_mod_host_probe(struct usbhs_priv *priv)
 {
@@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv)
 
 #if    defined(CONFIG_USB_RENESAS_USBHS_UDC) || \
        defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE)
-extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv);
-extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv);
+extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv);
+extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv);
 #else
 static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 {
index 4cc7ee0babc60441914ac69ca20a2fe1d857c5e6..7f4e803385702499b70e4a40fce377bc28a1e45f 100644 (file)
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
-       int ret;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->setup      ||
-           driver->speed != USB_SPEED_HIGH)
+           driver->speed < USB_SPEED_FULL)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        /* first hook up the driver ... */
        gpriv->driver = driver;
        gpriv->gadget.dev.driver = &driver->driver;
 
-       ret = device_add(&gpriv->gadget.dev);
-       if (ret) {
-               dev_err(dev, "device_add error %d\n", ret);
-               goto add_fail;
-       }
-
        return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
-       gpriv->driver = NULL;
-       gpriv->gadget.dev.driver = NULL;
-
-       return ret;
 }
 
 static int usbhsg_gadget_stop(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->unbind)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
-       device_del(&gpriv->gadget.dev);
+       gpriv->gadget.dev.driver = NULL;
        gpriv->driver = NULL;
 
        return 0;
@@ -827,10 +806,17 @@ static int usbhsg_start(struct usbhs_priv *priv)
 
 static int usbhsg_stop(struct usbhs_priv *priv)
 {
+       struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+       /* cable disconnect */
+       if (gpriv->driver &&
+           gpriv->driver->disconnect)
+               gpriv->driver->disconnect(&gpriv->gadget);
+
        return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
 }
 
-int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 {
        struct usbhsg_gpriv *gpriv;
        struct usbhsg_uep *uep;
@@ -876,12 +862,14 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
        /*
         * init gadget
         */
-       device_initialize(&gpriv->gadget.dev);
        dev_set_name(&gpriv->gadget.dev, "gadget");
        gpriv->gadget.dev.parent        = dev;
        gpriv->gadget.name              = "renesas_usbhs_udc";
        gpriv->gadget.ops               = &usbhsg_gadget_ops;
        gpriv->gadget.is_dualspeed      = 1;
+       ret = device_register(&gpriv->gadget.dev);
+       if (ret < 0)
+               goto err_add_udc;
 
        INIT_LIST_HEAD(&gpriv->gadget.ep_list);
 
@@ -912,12 +900,15 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 
        ret = usb_add_gadget_udc(dev, &gpriv->gadget);
        if (ret)
-               goto err_add_udc;
+               goto err_register;
 
 
        dev_info(dev, "gadget probed\n");
 
        return 0;
+
+err_register:
+       device_unregister(&gpriv->gadget.dev);
 err_add_udc:
        kfree(gpriv->uep);
 
@@ -927,12 +918,14 @@ usbhs_mod_gadget_probe_err_gpriv:
        return ret;
 }
 
-void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
+void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
 
        usb_del_gadget_udc(&gpriv->gadget);
 
+       device_unregister(&gpriv->gadget.dev);
+
        usbhsg_controller_unregister(gpriv);
 
        kfree(gpriv->uep);
index 1a7208a50afc15dc42de21b7bcbf6184ac204ff3..7955de5899512ecff50c099ab084a35a36a10f07 100644 (file)
@@ -103,7 +103,7 @@ struct usbhsh_hpriv {
 
        u32     port_stat;      /* USB_PORT_STAT_xxx */
 
-       struct completion       *done;
+       struct completion       setup_ack_done;
 
        /* see usbhsh_req_alloc/free */
        struct list_head        ureq_link_active;
@@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
 struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
                                        struct usbhsh_device *udev,
                                        struct usb_host_endpoint *ep,
+                                       int dir_in_req,
                                        gfp_t mem_flags)
 {
        struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
@@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
        struct usbhs_pipe *pipe, *best_pipe;
        struct device *dev = usbhsh_hcd_to_dev(hcd);
        struct usb_endpoint_descriptor *desc = &ep->desc;
-       int type, i;
+       int type, i, dir_in;
        unsigned int min_usr;
 
+       dir_in_req = !!dir_in_req;
+
        uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
        if (!uep) {
                dev_err(dev, "usbhsh_ep alloc fail\n");
                return NULL;
        }
-       type = usb_endpoint_type(desc);
+
+       if (usb_endpoint_xfer_control(desc)) {
+               best_pipe = usbhsh_hpriv_to_dcp(hpriv);
+               goto usbhsh_endpoint_alloc_find_pipe;
+       }
 
        /*
         * find best pipe for endpoint
         * see
         *      HARDWARE LIMITATION
         */
+       type = usb_endpoint_type(desc);
        min_usr = ~0;
        best_pipe = NULL;
-       usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
+       usbhs_for_each_pipe(pipe, priv, i) {
                if (!usbhs_pipe_type_is(pipe, type))
                        continue;
 
+               dir_in = !!usbhs_pipe_is_dir_in(pipe);
+               if (0 != (dir_in - dir_in_req))
+                       continue;
+
                info = usbhsh_pipe_info(pipe);
 
                if (min_usr > info->usr_cnt) {
@@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
                kfree(uep);
                return NULL;
        }
-
+usbhsh_endpoint_alloc_find_pipe:
        /*
         * init uep
         */
@@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
         * see
         *  DCPMAXP/PIPEMAXP
         */
+       usbhs_pipe_sequence_data0(uep->pipe);
        usbhs_pipe_config_update(uep->pipe,
                                 usbhsh_device_number(hpriv, udev),
                                 usb_endpoint_num(desc),
@@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
 
        dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
                usbhsh_device_number(hpriv, udev),
-               usbhs_pipe_name(pipe), uep);
+               usbhs_pipe_name(uep->pipe), uep);
 
        return uep;
 }
@@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
         *      usbhsh_irq_setup_ack()
         *      usbhsh_irq_setup_err()
         */
-       DECLARE_COMPLETION(done);
-       hpriv->done = &done;
+       init_completion(&hpriv->setup_ack_done);
 
        /* copy original request */
        memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
@@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
        /*
         * wait setup packet ACK
         */
-       wait_for_completion(&done);
-       hpriv->done = NULL;
+       wait_for_completion(&hpriv->setup_ack_done);
 
        dev_dbg(dev, "%s done\n", __func__);
 }
@@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
        struct usbhsh_device *udev, *new_udev = NULL;
        struct usbhs_pipe *pipe;
        struct usbhsh_ep *uep;
+       int is_dir_in = usb_pipein(urb->pipe);
 
        int ret;
 
-       dev_dbg(dev, "%s (%s)\n",
-               __func__, usb_pipein(urb->pipe) ? "in" : "out");
+       dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
 
        ret = usb_hcd_link_urb_to_ep(hcd, urb);
        if (ret)
@@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
         */
        uep = usbhsh_ep_to_uep(ep);
        if (!uep) {
-               uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags);
+               uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
+                                           is_dir_in, mem_flags);
                if (!uep)
                        goto usbhsh_urb_enqueue_error_free_device;
        }
@@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
 
        dev_dbg(dev, "setup packet OK\n");
 
-       if (unlikely(!hpriv->done))
-               dev_err(dev, "setup ack happen without necessary data\n");
-       else
-               complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+       complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
 
        return 0;
 }
@@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
 
        dev_dbg(dev, "setup packet Err\n");
 
-       if (unlikely(!hpriv->done))
-               dev_err(dev, "setup err happen without necessary data\n");
-       else
-               complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+       complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
 
        return 0;
 }
@@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv)
 {
        struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
        struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+       struct usbhs_mod *mod = usbhs_mod_get_current(priv);
        struct device *dev = usbhs_priv_to_dev(priv);
 
+       /*
+        * disable irq callback
+        */
+       mod->irq_attch  = NULL;
+       mod->irq_dtch   = NULL;
+       mod->irq_sack   = NULL;
+       mod->irq_sign   = NULL;
+       usbhs_irq_callback_update(priv, mod);
+
        usb_remove_hcd(hcd);
 
        /* disable sys */
@@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
        return 0;
 }
 
-int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
+int usbhs_mod_host_probe(struct usbhs_priv *priv)
 {
        struct usbhsh_hpriv *hpriv;
        struct usb_hcd *hcd;
@@ -1251,6 +1267,7 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
                dev_err(dev, "Failed to create hcd\n");
                return -ENOMEM;
        }
+       hcd->has_tt = 1; /* for low/full speed */
 
        pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
        if (!pipe_info) {
@@ -1279,7 +1296,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
        hpriv->mod.stop         = usbhsh_stop;
        hpriv->pipe_info        = pipe_info;
        hpriv->pipe_size        = pipe_size;
-       hpriv->done             = NULL;
        usbhsh_req_list_init(hpriv);
        usbhsh_port_stat_init(hpriv);
 
@@ -1299,7 +1315,7 @@ usbhs_mod_host_probe_err:
        return -ENOMEM;
 }
 
-int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv)
+int usbhs_mod_host_remove(struct usbhs_priv *priv)
 {
        struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
        struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
index 5cdb9d912275193bd7f5a8929742c4199a45c5c4..18e875b92e001d1c290f2b95e498eea77c6b72cb 100644 (file)
@@ -42,7 +42,7 @@ static int debug;
  * Version information
  */
 
-#define DRIVER_VERSION "v0.6"
+#define DRIVER_VERSION "v0.7"
 #define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
 #define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
 #define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
                goto err_out;
        }
 
-       /* setup termios */
-       if (tty)
-               ark3116_set_termios(tty, port, NULL);
-
        /* remove any data still left: also clears error state */
        ark3116_read_reg(serial, UART_RX, buf);
 
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
        /* enable DMA */
        ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
 
+       /* setup termios */
+       if (tty)
+               ark3116_set_termios(tty, port, NULL);
+
 err_out:
        kfree(buf);
        return result;
index 8fe034d2d3e7b1bf1c4b0471a1bfaff96ab218a7..ff3db5d056a56484fe594039f5817ac7d6ec4024 100644 (file)
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -2104,13 +2105,19 @@ static void ftdi_set_termios(struct tty_struct *tty,
 
        cflag = termios->c_cflag;
 
-       /* FIXME -For this cut I don't care if the line is really changing or
-          not  - so just do the change regardless  - should be able to
-          compare old_termios and tty->termios */
+       if (old_termios->c_cflag == termios->c_cflag
+           && old_termios->c_ispeed == termios->c_ispeed
+           && old_termios->c_ospeed == termios->c_ospeed)
+               goto no_c_cflag_changes;
+
        /* NOTE These routines can get interrupted by
           ftdi_sio_read_bulk_callback  - need to examine what this means -
           don't see any problems yet */
 
+       if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
+           (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
+               goto no_data_parity_stop_changes;
+
        /* Set number of data bits, parity, stop bits */
 
        urb_value = 0;
@@ -2151,6 +2158,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
        }
 
        /* Now do the baudrate */
+no_data_parity_stop_changes:
        if ((cflag & CBAUD) == B0) {
                /* Disable flow control */
                if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2178,6 +2186,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
 
        /* Set flow control */
        /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */
+no_c_cflag_changes:
        if (cflag & CRTSCTS) {
                dbg("%s Setting to CRTSCTS flow control", __func__);
                if (usb_control_msg(dev,
index 571fa96b49c7749b983c8d4a8f8228f3500f975c..055b64ef0bbad7ad6dd20200860c6874dd120f1e 100644 (file)
 
 /* Propox devices */
 #define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID    0xD739
 
 /* Lenz LI-USB Computer Interface. */
 #define FTDI_LENZ_LIUSB_PID    0xD780
index 89ae1f65e1b18bc46d8b8ccc0004cd94edb4ee40..6dd64534fad0d88c6239fa9c64279bcc82525112 100644 (file)
@@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb);
 #define HUAWEI_PRODUCT_K4511                   0x14CC
 #define HUAWEI_PRODUCT_ETS1220                 0x1803
 #define HUAWEI_PRODUCT_E353                    0x1506
+#define HUAWEI_PRODUCT_E173S                   0x1C05
 
 #define QUANTA_VENDOR_ID                       0x0408
 #define QUANTA_PRODUCT_Q101                    0xEA02
@@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb);
 #define ZTE_PRODUCT_AC8710                     0xfff1
 #define ZTE_PRODUCT_AC2726                     0xfff5
 #define ZTE_PRODUCT_AC8710T                    0xffff
+#define ZTE_PRODUCT_MC2718                     0xffe8
+#define ZTE_PRODUCT_AD3812                     0xffeb
+#define ZTE_PRODUCT_MC2716                     0xffed
 
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
@@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb);
 #define YUGA_PRODUCT_CLU528                    0x260D
 #define YUGA_PRODUCT_CLU526                    0x260F
 
+/* Viettel products */
+#define VIETTEL_VENDOR_ID                      0x2262
+#define VIETTEL_PRODUCT_VT1000                 0x0002
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
        .reserved = BIT(4),
 };
 
+static const struct option_blacklist_info zte_ad3812_z_blacklist = {
+       .sendsetup = BIT(0) | BIT(1) | BIT(2),
+};
+
+static const struct option_blacklist_info zte_mc2718_z_blacklist = {
+       .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+       .sendsetup = BIT(1) | BIT(2) | BIT(3),
+};
+
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
        .reserved = BIT(1) | BIT(2),
 };
@@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
@@ -640,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -726,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
@@ -1043,6 +1073,12 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
        { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -1141,6 +1177,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+       { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 9083d1e616b4b48c30661ecd4261dfff3c127c14..fc2d66f7f4eb0ce3410a066e3877155651398eff 100644 (file)
@@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
-       { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
        { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
        { }                                     /* Terminating entry */
 };
index 3d10d7f02072d10250f80802b7130fb941fb6793..c38b8c00c06fddd4c71f3508df13f592d2b4480b 100644 (file)
 #define ADLINK_VENDOR_ID               0x0b63
 #define ADLINK_ND6530_PRODUCT_ID       0x6530
 
-/* WinChipHead USB->RS 232 adapter */
-#define WINCHIPHEAD_VENDOR_ID          0x4348
-#define WINCHIPHEAD_USBSER_PRODUCT_ID  0x5523
-
 /* SMART USB Serial Adapter */
 #define SMART_VENDOR_ID        0x0b8c
 #define SMART_PRODUCT_ID       0x2303
index 4dca3ef0668c9963448289c3ab1160633865a6b9..9fbe742343c6cb6d023706bd7d5bd8170191d49e 100644 (file)
@@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
                result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
        } else {
                void *buf;
-               int offset;
+               int offset = 0;
                u16 PhyBlockAddr;
                u8 PageNum;
-               u32 result;
                u16 len, oldphy, newphy;
 
                buf = kmalloc(blenByte, GFP_KERNEL);
index 93c1a4d86f51785983c5bac899d272808d390aff..82dd834709c78f7627b82a53d5e268fb006744a2 100644 (file)
@@ -59,7 +59,9 @@
 
 void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
 {
-       /* Pad the SCSI command with zeros out to 12 bytes
+       /*
+        * Pad the SCSI command with zeros out to 12 bytes.  If the
+        * command already is 12 bytes or longer, leave it alone.
         *
         * NOTE: This only works because a scsi_cmnd struct field contains
         * a unsigned char cmnd[16], so we know we have storage available
@@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
        for (; srb->cmd_len<12; srb->cmd_len++)
                srb->cmnd[srb->cmd_len] = 0;
 
-       /* set command length to 12 bytes */
-       srb->cmd_len = 12;
-
        /* send the command to the transport layer */
        usb_stor_invoke_transport(srb, us);
 }
index 3041a974faf39278ef8fad4033e089f8e64b7b9c..24caba79d722a74fd2dba94f9560a7168e4bb26c 100644 (file)
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV(  0x1370, 0x6828, 0x0110, 0x0110,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV(  0x13fe, 0x3600, 0x0100, 0x0100,
+               "Kingston",
+               "DT 101 G2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BULK_IGNORE_TAG ),
+
 /* Reported by Francesco Foresti <frafore@tiscali.it> */
 UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                "Super Top",
index 55f91d9ab00bd18bd01df877eab846a310595956..29577bf1f559070044ee2c9f50d300d86745f78e 100644 (file)
 /* Clock registers available only on Version 2 */
 #define  LCD_CLK_ENABLE_REG                    0x6c
 #define  LCD_CLK_RESET_REG                     0x70
+#define  LCD_CLK_MAIN_RESET                    BIT(3)
 
 #define LCD_NUM_BUFFERS        2
 
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
 {
        u32 reg;
 
+       /* Bring LCDC out of reset */
+       if (lcd_revision == LCD_VERSION_2)
+               lcdc_write(0, LCD_CLK_RESET_REG);
+
        reg = lcdc_read(LCD_RASTER_CTRL_REG);
        if (!(reg & LCD_RASTER_ENABLE))
                lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
        reg = lcdc_read(LCD_RASTER_CTRL_REG);
        if (reg & LCD_RASTER_ENABLE)
                lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+       if (lcd_revision == LCD_VERSION_2)
+               /* Write 1 to reset LCDC */
+               lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
 }
 
 static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
        lcdc_write(0, LCD_DMA_CTRL_REG);
        lcdc_write(0, LCD_RASTER_CTRL_REG);
 
-       if (lcd_revision == LCD_VERSION_2)
+       if (lcd_revision == LCD_VERSION_2) {
                lcdc_write(0, LCD_INT_ENABLE_SET_REG);
+               /* Write 1 to reset */
+               lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
+               lcdc_write(0, LCD_CLK_RESET_REG);
+       }
 }
 
 static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
index 0ccd7adf47bb2db54312287fb5191ac8ed4b2b94..6f61e781f15afa94ae4ebbca32b57407b648a47a 100644 (file)
@@ -19,6 +19,7 @@
  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
index 3532782551cb80dfa6ee1254be24d669c2fb28ef..5c81533eacaa6224c3aed27d0c1e72a60616c3d7 100644 (file)
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
        const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
        unsigned long fclk = 0;
 
-       if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
-               if (width != out_width || height != out_height)
-                       return -EINVAL;
-               else
-                       return 0;
-       }
+       if (width == out_width && height == out_height)
+               return 0;
+
+       if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
+               return -EINVAL;
 
        if (out_width < width / maxdownscale ||
                        out_width > width * 8)
index 3262f0f1fa35f395aec7637dc4e61f4fdf5766b1..c56378c555b0907255048e32384c1b5b191bc14f 100644 (file)
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
 unsigned long hdmi_get_pixel_clock(void)
 {
        /* HDMI Pixel Clock in Mhz */
-       return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000;
+       return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
 }
 
 static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
index 69d882cbe7095f0acde27c9e61e12ea3236948d8..c01c1c162726137e19197c112313aa7458a91e0b 100644 (file)
 #define M1200X720_R60_VSP       POSITIVE
 
 /* 1200x900@60 Sync Polarity (DCON) */
-#define M1200X900_R60_HSP       NEGATIVE
-#define M1200X900_R60_VSP       NEGATIVE
+#define M1200X900_R60_HSP       POSITIVE
+#define M1200X900_R60_VSP       POSITIVE
 
 /* 1280x600@60 Sync Polarity (GTF Mode) */
 #define M1280x600_R60_HSP       NEGATIVE
index 816ed08e7cf3c504f4ce5377b327964a30daa8e9..1a61939b85fce4f3a1269b7915b1e9c3760efe26 100644 (file)
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON
 
  config VIRTIO_MMIO
        tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
-       depends on EXPERIMENTAL
+       depends on HAS_IOMEM && EXPERIMENTAL
        select VIRTIO
        select VIRTIO_RING
        ---help---
index acc5e43c373eb25c3d24410a70a39392caf551f4..0269717436af03f5d502be98629927e7591e5cc4 100644 (file)
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev)
        vring_transport_features(vdev);
 
        for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
-               writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
+               writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
                writel(vdev->features[i],
                                vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
        }
@@ -361,7 +361,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
        return 0;
 }
 
+static const char *vm_bus_name(struct virtio_device *vdev)
+{
+       struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 
+       return vm_dev->pdev->name;
+}
 
 static struct virtio_config_ops virtio_mmio_config_ops = {
        .get            = vm_get,
@@ -373,6 +378,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
        .del_vqs        = vm_del_vqs,
        .get_features   = vm_get_features,
        .finalize_features = vm_finalize_features,
+       .bus_name       = vm_bus_name,
 };
 
 
index 3d1bf41e8892b8e2150a51403726ebb52ea9d56c..baabb7937ec2c338fc813fb91cfaac21b0c94484 100644 (file)
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
        iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
 }
 
+/* wait for pending irq handlers */
+static void vp_synchronize_vectors(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       int i;
+
+       if (vp_dev->intx_enabled)
+               synchronize_irq(vp_dev->pci_dev->irq);
+
+       for (i = 0; i < vp_dev->msix_vectors; ++i)
+               synchronize_irq(vp_dev->msix_entries[i].vector);
+}
+
 static void vp_reset(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        /* 0 status means a reset. */
        iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+       /* Flush out the status write, and flush in device writes,
+        * including MSi-X interrupts, if any. */
+       ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
 }
 
 /* the notify function used when creating a virt queue */
@@ -580,6 +598,13 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                                  false, false);
 }
 
+static const char *vp_bus_name(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       return pci_name(vp_dev->pci_dev);
+}
+
 static struct virtio_config_ops virtio_pci_config_ops = {
        .get            = vp_get,
        .set            = vp_set,
@@ -590,6 +615,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
        .del_vqs        = vp_del_vqs,
        .get_features   = vp_get_features,
        .finalize_features = vp_finalize_features,
+       .bus_name       = vp_bus_name,
 };
 
 static void virtio_pci_release_dev(struct device *_d)
index 6285867a93568c5dd03576d5e5adf1779d7c21d1..79fd606b7cd5cad60f487120db2e3ebf408ddedb 100644 (file)
@@ -314,13 +314,6 @@ config NUC900_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called nuc900_wdt.
 
-config ADX_WATCHDOG
-       tristate "Avionic Design Xanthos watchdog"
-       depends on ARCH_PXA_ADX
-       help
-         Say Y here if you want support for the watchdog timer on Avionic
-         Design Xanthos boards.
-
 config TS72XX_WATCHDOG
        tristate "TS-72XX SBC Watchdog"
        depends on MACH_TS72XX
index 55bd5740e91000f1616da7ad0a4adf5eb4c33ac7..fe893e91935b6652e680b3b722ab3bb7b71bf3b3 100644 (file)
@@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
 obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
 obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
 obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
-obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
 obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
 obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
 
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
deleted file mode 100644 (file)
index af6e6b1..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Avionic Design GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/fs.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/watchdog.h>
-
-#define WATCHDOG_NAME "adx-wdt"
-
-/* register offsets */
-#define        ADX_WDT_CONTROL         0x00
-#define        ADX_WDT_CONTROL_ENABLE  (1 << 0)
-#define        ADX_WDT_CONTROL_nRESET  (1 << 1)
-#define        ADX_WDT_TIMEOUT         0x08
-
-static struct platform_device *adx_wdt_dev;
-static unsigned long driver_open;
-
-#define        WDT_STATE_STOP  0
-#define        WDT_STATE_START 1
-
-struct adx_wdt {
-       void __iomem *base;
-       unsigned long timeout;
-       unsigned int state;
-       unsigned int wake;
-       spinlock_t lock;
-};
-
-static const struct watchdog_info adx_wdt_info = {
-       .identity = "Avionic Design Xanthos Watchdog",
-       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-};
-
-static void adx_wdt_start_locked(struct adx_wdt *wdt)
-{
-       u32 ctrl;
-
-       ctrl = readl(wdt->base + ADX_WDT_CONTROL);
-       ctrl |= ADX_WDT_CONTROL_ENABLE;
-       writel(ctrl, wdt->base + ADX_WDT_CONTROL);
-       wdt->state = WDT_STATE_START;
-}
-
-static void adx_wdt_start(struct adx_wdt *wdt)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&wdt->lock, flags);
-       adx_wdt_start_locked(wdt);
-       spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_stop_locked(struct adx_wdt *wdt)
-{
-       u32 ctrl;
-
-       ctrl = readl(wdt->base + ADX_WDT_CONTROL);
-       ctrl &= ~ADX_WDT_CONTROL_ENABLE;
-       writel(ctrl, wdt->base + ADX_WDT_CONTROL);
-       wdt->state = WDT_STATE_STOP;
-}
-
-static void adx_wdt_stop(struct adx_wdt *wdt)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&wdt->lock, flags);
-       adx_wdt_stop_locked(wdt);
-       spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
-{
-       unsigned long timeout = seconds * 1000;
-       unsigned long flags;
-       unsigned int state;
-
-       spin_lock_irqsave(&wdt->lock, flags);
-       state = wdt->state;
-       adx_wdt_stop_locked(wdt);
-       writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
-
-       if (state == WDT_STATE_START)
-               adx_wdt_start_locked(wdt);
-
-       wdt->timeout = timeout;
-       spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
-{
-       *seconds = wdt->timeout / 1000;
-}
-
-static void adx_wdt_keepalive(struct adx_wdt *wdt)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&wdt->lock, flags);
-       writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
-       spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static int adx_wdt_open(struct inode *inode, struct file *file)
-{
-       struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
-
-       if (test_and_set_bit(0, &driver_open))
-               return -EBUSY;
-
-       file->private_data = wdt;
-       adx_wdt_set_timeout(wdt, 30);
-       adx_wdt_start(wdt);
-
-       return nonseekable_open(inode, file);
-}
-
-static int adx_wdt_release(struct inode *inode, struct file *file)
-{
-       struct adx_wdt *wdt = file->private_data;
-
-       adx_wdt_stop(wdt);
-       clear_bit(0, &driver_open);
-
-       return 0;
-}
-
-static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct adx_wdt *wdt = file->private_data;
-       void __user *argp = (void __user *)arg;
-       unsigned long __user *p = argp;
-       unsigned long seconds = 0;
-       unsigned int options;
-       long ret = -EINVAL;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
-                       return -EFAULT;
-               else
-                       return 0;
-
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               return put_user(0, p);
-
-       case WDIOC_KEEPALIVE:
-               adx_wdt_keepalive(wdt);
-               return 0;
-
-       case WDIOC_SETTIMEOUT:
-               if (get_user(seconds, p))
-                       return -EFAULT;
-
-               adx_wdt_set_timeout(wdt, seconds);
-
-               /* fallthrough */
-       case WDIOC_GETTIMEOUT:
-               adx_wdt_get_timeout(wdt, &seconds);
-               return put_user(seconds, p);
-
-       case WDIOC_SETOPTIONS:
-               if (copy_from_user(&options, argp, sizeof(options)))
-                       return -EFAULT;
-
-               if (options & WDIOS_DISABLECARD) {
-                       adx_wdt_stop(wdt);
-                       ret = 0;
-               }
-
-               if (options & WDIOS_ENABLECARD) {
-                       adx_wdt_start(wdt);
-                       ret = 0;
-               }
-
-               return ret;
-
-       default:
-               break;
-       }
-
-       return -ENOTTY;
-}
-
-static ssize_t adx_wdt_write(struct file *file, const char __user *data,
-               size_t len, loff_t *ppos)
-{
-       struct adx_wdt *wdt = file->private_data;
-
-       if (len)
-               adx_wdt_keepalive(wdt);
-
-       return len;
-}
-
-static const struct file_operations adx_wdt_fops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .open = adx_wdt_open,
-       .release = adx_wdt_release,
-       .unlocked_ioctl = adx_wdt_ioctl,
-       .write = adx_wdt_write,
-};
-
-static struct miscdevice adx_wdt_miscdev = {
-       .minor = WATCHDOG_MINOR,
-       .name = "watchdog",
-       .fops = &adx_wdt_fops,
-};
-
-static int __devinit adx_wdt_probe(struct platform_device *pdev)
-{
-       struct resource *res;
-       struct adx_wdt *wdt;
-       int ret = 0;
-       u32 ctrl;
-
-       wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
-       if (!wdt) {
-               dev_err(&pdev->dev, "cannot allocate WDT structure\n");
-               return -ENOMEM;
-       }
-
-       spin_lock_init(&wdt->lock);
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
-               return -ENXIO;
-       }
-
-       res = devm_request_mem_region(&pdev->dev, res->start,
-                       resource_size(res), res->name);
-       if (!res) {
-               dev_err(&pdev->dev, "cannot request I/O memory region\n");
-               return -ENXIO;
-       }
-
-       wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
-                       resource_size(res));
-       if (!wdt->base) {
-               dev_err(&pdev->dev, "cannot remap I/O memory region\n");
-               return -ENXIO;
-       }
-
-       /* disable watchdog and reboot on timeout */
-       ctrl = readl(wdt->base + ADX_WDT_CONTROL);
-       ctrl &= ~ADX_WDT_CONTROL_ENABLE;
-       ctrl &= ~ADX_WDT_CONTROL_nRESET;
-       writel(ctrl, wdt->base + ADX_WDT_CONTROL);
-
-       platform_set_drvdata(pdev, wdt);
-       adx_wdt_dev = pdev;
-
-       ret = misc_register(&adx_wdt_miscdev);
-       if (ret) {
-               dev_err(&pdev->dev, "cannot register miscdev on minor %d "
-                               "(err=%d)\n", WATCHDOG_MINOR, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int __devexit adx_wdt_remove(struct platform_device *pdev)
-{
-       struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
-       misc_deregister(&adx_wdt_miscdev);
-       adx_wdt_stop(wdt);
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static void adx_wdt_shutdown(struct platform_device *pdev)
-{
-       struct adx_wdt *wdt = platform_get_drvdata(pdev);
-       adx_wdt_stop(wdt);
-}
-
-#ifdef CONFIG_PM
-static int adx_wdt_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
-       wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
-       adx_wdt_stop(wdt);
-
-       return 0;
-}
-
-static int adx_wdt_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
-       if (wdt->wake)
-               adx_wdt_start(wdt);
-
-       return 0;
-}
-
-static const struct dev_pm_ops adx_wdt_pm_ops = {
-       .suspend = adx_wdt_suspend,
-       .resume = adx_wdt_resume,
-};
-
-#  define ADX_WDT_PM_OPS       (&adx_wdt_pm_ops)
-#else
-#  define ADX_WDT_PM_OPS       NULL
-#endif
-
-static struct platform_driver adx_wdt_driver = {
-       .probe = adx_wdt_probe,
-       .remove = __devexit_p(adx_wdt_remove),
-       .shutdown = adx_wdt_shutdown,
-       .driver = {
-               .name = WATCHDOG_NAME,
-               .owner = THIS_MODULE,
-               .pm = ADX_WDT_PM_OPS,
-       },
-};
-
-static int __init adx_wdt_init(void)
-{
-       return platform_driver_register(&adx_wdt_driver);
-}
-
-static void __exit adx_wdt_exit(void)
-{
-       platform_driver_unregister(&adx_wdt_driver);
-}
-
-module_init(adx_wdt_init);
-module_exit(adx_wdt_exit);
-
-MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index 03f449a430d253ca997f0e84e721748707fb674f..5b89f7d6cd0ff4bb6d2f305e025b2ad3ebd18c5b 100644 (file)
@@ -76,8 +76,6 @@ static int irq;
 static void __iomem *virtbase;
 static unsigned long coh901327_users;
 static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
 static struct device *parent;
 
 /*
@@ -461,6 +459,10 @@ out:
 }
 
 #ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
 static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
 {
        irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
index 3774c9b8dac9c6868a28c5eda079e7915b7d24cb..8464ea1c36a1080f4c4b4045333a70b963d08b5d 100644 (file)
@@ -231,6 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
 
        cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
 
+       set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
        asminline_call(&cmn_regs, bios32_entrypoint);
 
        if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@ static int __devinit cru_detect(unsigned long map_entry,
                if ((physical_bios_base + physical_bios_offset)) {
                        cru_rom_addr =
                                ioremap(cru_physical_address, cru_length);
-                       if (cru_rom_addr)
+                       if (cru_rom_addr) {
+                               set_memory_x((unsigned long)cru_rom_addr, cru_length);
                                retval = 0;
+                       }
                }
 
                printk(KERN_DEBUG "hpwdt: CRU Base Address:   0x%lx\n",
index ba6ad662635ae97776cf51c16b5fba482cc8a8c8..99796c5d913db2c9f354dbd06b503d8d65580cdf 100644 (file)
@@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout,
        "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
 module_param(turn_SMI_watchdog_clear_off, int, 0);
 MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
-       "Turn off SMI clearing watchdog (default=0)");
+       "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
 
 /*
  * Some TCO specific functions
@@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
                ret = -EIO;
                goto out_unmap;
        }
-       if (turn_SMI_watchdog_clear_off) {
+       if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
                /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
                val32 = inl(SMI_EN);
                val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
index 5de7e4fa5b8a62fcc3f366f760a4f2b7e985cfff..a79e3840782ad3f286f0971b4a3cd7f6d5a1ff0b 100644 (file)
@@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
 
        dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
                 (wtcon & S3C2410_WTCON_ENABLE) ?  "" : "in",
-                (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
-                (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");
+                (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis",
+                (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis");
 
        return 0;
 
index cc2cfbe33b30d441a2ef6449d2babe0b1eb43a3d..bfaf9bb1ee0d1ff384a014742f5c8148e96fc040 100644 (file)
@@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
        return 0;
 }
 
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
        {
                .id     = 0x00141805,
                .mask   = 0x00ffffff,
index 7be38556aed0c5b0854a89704875218f15437b02..e789a47db41f4dc1c618efaba3c8e189851c42bb 100644 (file)
@@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
                if (wm831x_wdt_cfgs[i].time == timeout)
                        break;
        if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
-               ret = -EINVAL;
+               return -EINVAL;
 
        ret = wm831x_reg_unlock(wm831x);
        if (ret == 0) {
index a767884a6c7a10cfb526aff86de7b0ca1848eb9f..31ab82fda38a264cd045c4b52c117eb375f8e80e 100644 (file)
@@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
  * alloc_xenballooned_pages - get pages that have been ballooned out
  * @nr_pages: Number of pages to get
  * @pages: pages returned
- * @highmem: highmem or lowmem pages
+ * @highmem: allow highmem pages
  * @return 0 on success, error otherwise
  */
 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
@@ -511,7 +511,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
        mutex_lock(&balloon_mutex);
        while (pgno < nr_pages) {
                page = balloon_retrieve(highmem);
-               if (page && PageHighMem(page) == highmem) {
+               if (page && (highmem || !PageHighMem(page))) {
                        pages[pgno++] = page;
                } else {
                        enum bp_state st;
index f6832f46aea4a4861368465321cc8423f997cdfc..e1c4c6e5b469c44449f68e9e841d264eb08dbc48 100644 (file)
@@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
                /* Grant foreign access to the page. */
                gref->gref_id = gnttab_grant_foreign_access(op->domid,
                        pfn_to_mfn(page_to_pfn(gref->page)), readonly);
-               if (gref->gref_id < 0) {
+               if ((int)gref->gref_id < 0) {
                        rc = gref->gref_id;
                        goto undo;
                }
@@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
                goto out;
        }
 
-       gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+       gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
        if (!gref_ids) {
                rc = -ENOMEM;
                goto out;
index 39871326afa2ebb5905d2f6afbc922bb08826d0a..afca14d9042e6cd2ae03f238143ac8188aaf35fb 100644 (file)
@@ -114,11 +114,11 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
        if (NULL == add)
                return NULL;
 
-       add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
-       add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
-       add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
-       add->kmap_ops  = kzalloc(sizeof(add->kmap_ops[0])  * count, GFP_KERNEL);
-       add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
+       add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
+       add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
+       add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
+       add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
+       add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
        if (NULL == add->grants    ||
            NULL == add->map_ops   ||
            NULL == add->unmap_ops ||
index 8e964b91c447b117b7d4d17731201254a79365d5..284798aaf8b1391fd8d4de7e43bff52e5721272d 100644 (file)
@@ -166,7 +166,7 @@ retry:
        /*
         * Get IO TLB memory from any location.
         */
-       xen_io_tlb_start = alloc_bootmem(bytes);
+       xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
        if (!xen_io_tlb_start) {
                m = "Cannot allocate Xen-SWIOTLB buffer!\n";
                goto error;
@@ -179,7 +179,7 @@ retry:
                               bytes,
                               xen_io_tlb_nslabs);
        if (rc) {
-               free_bootmem(__pa(xen_io_tlb_start), bytes);
+               free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
                m = "Failed to get contiguous memory for DMA from Xen!\n"\
                    "You either: don't have the permissions, do not have"\
                    " enough free memory under 4GB, or the hypervisor memory"\
index 81c3ce6b8bbeed198a77f22e21f2031131a82f42..1906125eab491bb384293c4424c27b8f0477e6f0 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <asm/xen/hypervisor.h>
+#include <asm/xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
 #include <xen/events.h>
@@ -436,19 +437,20 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
 {
        struct gnttab_map_grant_ref op = {
-               .flags = GNTMAP_host_map,
+               .flags = GNTMAP_host_map | GNTMAP_contains_pte,
                .ref   = gnt_ref,
                .dom   = dev->otherend_id,
        };
        struct vm_struct *area;
+       pte_t *pte;
 
        *vaddr = NULL;
 
-       area = alloc_vm_area(PAGE_SIZE);
+       area = alloc_vm_area(PAGE_SIZE, &pte);
        if (!area)
                return -ENOMEM;
 
-       op.host_addr = (unsigned long)area->addr;
+       op.host_addr = arbitrary_virt_to_machine(pte).maddr;
 
        if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
                BUG();
@@ -527,6 +529,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
        struct gnttab_unmap_grant_ref op = {
                .host_addr = (unsigned long)vaddr,
        };
+       unsigned int level;
 
        /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
         * method so that we don't have to muck with vmalloc internals here.
@@ -548,6 +551,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
        }
 
        op.handle = (grant_handle_t)area->phys_addr;
+       op.host_addr = arbitrary_virt_to_machine(
+               lookup_address((unsigned long)vaddr, &level)).maddr;
 
        if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
                BUG();
index b3b8f2f3ad106aea4b4f33c9f61bf7eb01919563..ede860f921df847e3c94a2abda905949737c4e3d 100644 (file)
@@ -621,15 +621,6 @@ static struct xenbus_watch *find_watch(const char *token)
        return NULL;
 }
 
-static void xs_reset_watches(void)
-{
-       int err;
-
-       err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
-       if (err && err != -EEXIST)
-               printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
 /* Register callback to watch this node. */
 int register_xenbus_watch(struct xenbus_watch *watch)
 {
@@ -906,9 +897,5 @@ int xs_init(void)
        if (IS_ERR(task))
                return PTR_ERR(task);
 
-       /* shutdown watches for kexec boot */
-       if (xen_hvm_domain())
-               xs_reset_watches();
-
        return 0;
 }
index e24cd8986d8badacc41ce764a15cc99f2524fb8c..ea78c3a17eecd8d4bf38e182fcb4ca1ce0389492 100644 (file)
@@ -12,7 +12,7 @@ here.
 This directory is _NOT_ for adding arbitrary new firmware images. The
 place to add those is the separate linux-firmware repository:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
 
 That repository contains all these firmware images which have been
 extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
 To submit firmware to that repository, please send either a git binary
 diff or preferably a git pull request to:
       David Woodhouse <dwmw2@infradead.org>
+      Ben Hutchings <ben@decadent.org.uk>
 
 Your commit should include an update to the WHENCE file clearly
 identifying the licence under which the firmware is available, and
index 41c93c72224457179bda4ca18798b6f2add617c9..b1fe82cf88cfe0864a2d9603b713f8caacecde4e 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -337,7 +337,7 @@ static void bio_fs_destructor(struct bio *bio)
  *     RETURNS:
  *     Pointer to new bio on success, NULL on failure.
  */
-struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
+struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 {
        struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
 
@@ -365,7 +365,7 @@ static void bio_kmalloc_destructor(struct bio *bio)
  *   %__GFP_WAIT, the allocation is guaranteed to succeed.
  *
  **/
-struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
+struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 {
        struct bio *bio;
 
@@ -696,7 +696,8 @@ static void bio_free_map_data(struct bio_map_data *bmd)
        kfree(bmd);
 }
 
-static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
+static struct bio_map_data *bio_alloc_map_data(int nr_segs,
+                                              unsigned int iov_count,
                                               gfp_t gfp_mask)
 {
        struct bio_map_data *bmd;
index 7ec14097fef1f3bbb9b7611a96111c55d0ba9e19..0b394580d8603becf8c5c031745687f6ba9e9aeb 100644 (file)
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
        int idle;
 };
 
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
 /*
  * btrfs_start_workers uses kthread_run, which can block waiting for memory
  * for a very long time.  It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
 {
        struct worker_start *start;
        start = container_of(work, struct worker_start, work);
-       btrfs_start_workers(start->queue, 1);
+       __btrfs_start_workers(start->queue);
        kfree(start);
 }
 
-static int start_new_worker(struct btrfs_workers *queue)
-{
-       struct worker_start *start;
-       int ret;
-
-       start = kzalloc(sizeof(*start), GFP_NOFS);
-       if (!start)
-               return -ENOMEM;
-
-       start->work.func = start_new_worker_func;
-       start->queue = queue;
-       ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
-       if (ret)
-               kfree(start);
-       return ret;
-}
-
 /*
  * helper function to move a thread onto the idle list after it
  * has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 {
        struct btrfs_workers *workers = worker->workers;
+       struct worker_start *start;
        unsigned long flags;
 
        rmb();
        if (!workers->atomic_start_pending)
                return;
 
+       start = kzalloc(sizeof(*start), GFP_NOFS);
+       if (!start)
+               return;
+
+       start->work.func = start_new_worker_func;
+       start->queue = workers;
+
        spin_lock_irqsave(&workers->lock, flags);
        if (!workers->atomic_start_pending)
                goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 
        workers->num_workers_starting += 1;
        spin_unlock_irqrestore(&workers->lock, flags);
-       start_new_worker(workers);
+       btrfs_queue_worker(workers->atomic_worker_start, &start->work);
        return;
 
 out:
+       kfree(start);
        spin_unlock_irqrestore(&workers->lock, flags);
 }
 
@@ -331,7 +325,7 @@ again:
                        run_ordered_completions(worker->workers, work);
 
                        check_pending_worker_creates(worker);
-
+                       cond_resched();
                }
 
                spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
  * starts new worker threads.  This does not enforce the max worker
  * count in case you need to temporarily go past it.
  */
-static int __btrfs_start_workers(struct btrfs_workers *workers,
-                                int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
 {
        struct btrfs_worker_thread *worker;
        int ret = 0;
-       int i;
 
-       for (i = 0; i < num_workers; i++) {
-               worker = kzalloc(sizeof(*worker), GFP_NOFS);
-               if (!worker) {
-                       ret = -ENOMEM;
-                       goto fail;
-               }
+       worker = kzalloc(sizeof(*worker), GFP_NOFS);
+       if (!worker) {
+               ret = -ENOMEM;
+               goto fail;
+       }
 
-               INIT_LIST_HEAD(&worker->pending);
-               INIT_LIST_HEAD(&worker->prio_pending);
-               INIT_LIST_HEAD(&worker->worker_list);
-               spin_lock_init(&worker->lock);
-
-               atomic_set(&worker->num_pending, 0);
-               atomic_set(&worker->refs, 1);
-               worker->workers = workers;
-               worker->task = kthread_run(worker_loop, worker,
-                                          "btrfs-%s-%d", workers->name,
-                                          workers->num_workers + i);
-               if (IS_ERR(worker->task)) {
-                       ret = PTR_ERR(worker->task);
-                       kfree(worker);
-                       goto fail;
-               }
-               spin_lock_irq(&workers->lock);
-               list_add_tail(&worker->worker_list, &workers->idle_list);
-               worker->idle = 1;
-               workers->num_workers++;
-               workers->num_workers_starting--;
-               WARN_ON(workers->num_workers_starting < 0);
-               spin_unlock_irq(&workers->lock);
+       INIT_LIST_HEAD(&worker->pending);
+       INIT_LIST_HEAD(&worker->prio_pending);
+       INIT_LIST_HEAD(&worker->worker_list);
+       spin_lock_init(&worker->lock);
+
+       atomic_set(&worker->num_pending, 0);
+       atomic_set(&worker->refs, 1);
+       worker->workers = workers;
+       worker->task = kthread_run(worker_loop, worker,
+                                  "btrfs-%s-%d", workers->name,
+                                  workers->num_workers + 1);
+       if (IS_ERR(worker->task)) {
+               ret = PTR_ERR(worker->task);
+               kfree(worker);
+               goto fail;
        }
+       spin_lock_irq(&workers->lock);
+       list_add_tail(&worker->worker_list, &workers->idle_list);
+       worker->idle = 1;
+       workers->num_workers++;
+       workers->num_workers_starting--;
+       WARN_ON(workers->num_workers_starting < 0);
+       spin_unlock_irq(&workers->lock);
+
        return 0;
 fail:
-       btrfs_stop_workers(workers);
+       spin_lock_irq(&workers->lock);
+       workers->num_workers_starting--;
+       spin_unlock_irq(&workers->lock);
        return ret;
 }
 
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
 {
        spin_lock_irq(&workers->lock);
-       workers->num_workers_starting += num_workers;
+       workers->num_workers_starting++;
        spin_unlock_irq(&workers->lock);
-       return __btrfs_start_workers(workers, num_workers);
+       return __btrfs_start_workers(workers);
 }
 
 /*
@@ -568,9 +561,10 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
        struct btrfs_worker_thread *worker;
        unsigned long flags;
        struct list_head *fallback;
+       int ret;
 
-again:
        spin_lock_irqsave(&workers->lock, flags);
+again:
        worker = next_worker(workers);
 
        if (!worker) {
@@ -584,7 +578,10 @@ again:
                        workers->num_workers_starting++;
                        spin_unlock_irqrestore(&workers->lock, flags);
                        /* we're below the limit, start another worker */
-                       __btrfs_start_workers(workers, 1);
+                       ret = __btrfs_start_workers(workers);
+                       spin_lock_irqsave(&workers->lock, flags);
+                       if (ret)
+                               goto fallback;
                        goto again;
                }
        }
@@ -665,7 +662,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
 /*
  * places a struct btrfs_work into the pending queue of one of the kthreads
  */
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 {
        struct btrfs_worker_thread *worker;
        unsigned long flags;
@@ -673,7 +670,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 
        /* don't requeue something already on a list */
        if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
-               goto out;
+               return;
 
        worker = find_worker(workers);
        if (workers->ordered) {
@@ -712,7 +709,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
        if (wake)
                wake_up_process(worker->task);
        spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
-       return 0;
 }
index 5077746cf85e049e87bcd8ded49b592ecc271605..f34cc31fa3c9a8d1c55f7181ca05b10a11f8ba64 100644 (file)
@@ -109,8 +109,8 @@ struct btrfs_workers {
        char *name;
 };
 
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
 int btrfs_stop_workers(struct btrfs_workers *workers);
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
                        struct btrfs_workers *async_starter);
index 8855aad3929c337cb1891cd63c702ff1b69e9322..22c64fff1bd524b213ce8b13a233f861680d8424 100644 (file)
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
                return PTR_ERR(fspath);
 
        if (fspath > fspath_min) {
-               ipath->fspath->val[i] = (u64)fspath;
+               ipath->fspath->val[i] = (u64)(unsigned long)fspath;
                ++ipath->fspath->elem_cnt;
                ipath->fspath->bytes_left = fspath - fspath_min;
        } else {
index 0fe615e4ea387582acc06f60cac81366f23fe069..dede441bdeee2678225187bece170710abe1a9b4 100644 (file)
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   struct extent_buffer *buf)
 {
+       /* ensure we can see the force_cow */
+       smp_rmb();
+
+       /*
+        * We do not need to cow a block if
+        * 1) this block is not created or changed in this transaction;
+        * 2) this block does not belong to TREE_RELOC tree;
+        * 3) the root is not forced COW.
+        *
+        * What is forced COW:
+        *    when we create snapshot during commiting the transaction,
+        *    after we've finished coping src root, we must COW the shared
+        *    block to ensure the metadata consistency.
+        */
        if (btrfs_header_generation(buf) == trans->transid &&
            !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
            !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
-             btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
+             btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
+           !root->force_cow)
                return 0;
        return 1;
 }
index b9ba59ff9292559330745b8afb41eefacfbf1615..67385033323d6e49817398a1df9b1596df07e839 100644 (file)
@@ -848,7 +848,8 @@ struct btrfs_free_cluster {
 enum btrfs_caching_type {
        BTRFS_CACHE_NO          = 0,
        BTRFS_CACHE_STARTED     = 1,
-       BTRFS_CACHE_FINISHED    = 2,
+       BTRFS_CACHE_FAST        = 2,
+       BTRFS_CACHE_FINISHED    = 3,
 };
 
 enum btrfs_disk_cache_state {
@@ -1271,6 +1272,8 @@ struct btrfs_root {
         * for stat.  It may be used for more later
         */
        dev_t anon_dev;
+
+       int force_cow;
 };
 
 struct btrfs_ioctl_defrag_range_args {
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
 int btrfs_block_rsv_refill(struct btrfs_root *root,
                          struct btrfs_block_rsv *block_rsv,
                          u64 min_reserved);
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+                                  struct btrfs_block_rsv *block_rsv,
+                                  u64 min_reserved);
 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
                            struct btrfs_block_rsv *dst_rsv,
                            u64 num_bytes);
@@ -2686,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
index 5b163572e0ca7ddd6d5f8ef6ff6783dbd5871c4d..9c1eccc2c503e5eec8bd20d3dfc057a417eef89a 100644 (file)
@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
         * we're accounted for.
         */
-       if (!trans->bytes_reserved &&
-           src_rsv != &root->fs_info->delalloc_block_rsv) {
+       if (!src_rsv || (!trans->bytes_reserved &&
+           src_rsv != &root->fs_info->delalloc_block_rsv)) {
                ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
                /*
                 * Since we're under a transaction reserve_metadata_bytes could
index 62afe5c5694e9f4681931daee508334835bad243..f44b3928dc2dc94cb62cefe72f1063f282dc09c2 100644 (file)
@@ -620,7 +620,7 @@ out:
 
 static int btree_io_failed_hook(struct bio *failed_bio,
                         struct page *page, u64 start, u64 end,
-                        u64 mirror_num, struct extent_state *state)
+                        int mirror_num, struct extent_state *state)
 {
        struct extent_io_tree *tree;
        unsigned long len;
@@ -2194,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->endio_meta_write_workers.idle_thresh = 2;
        fs_info->readahead_workers.idle_thresh = 2;
 
-       btrfs_start_workers(&fs_info->workers, 1);
-       btrfs_start_workers(&fs_info->generic_worker, 1);
-       btrfs_start_workers(&fs_info->submit_workers, 1);
-       btrfs_start_workers(&fs_info->delalloc_workers, 1);
-       btrfs_start_workers(&fs_info->fixup_workers, 1);
-       btrfs_start_workers(&fs_info->endio_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
-       btrfs_start_workers(&fs_info->delayed_workers, 1);
-       btrfs_start_workers(&fs_info->caching_workers, 1);
-       btrfs_start_workers(&fs_info->readahead_workers, 1);
+       /*
+        * btrfs_start_workers can really only fail because of ENOMEM so just
+        * return -ENOMEM if any of these fail.
+        */
+       ret = btrfs_start_workers(&fs_info->workers);
+       ret |= btrfs_start_workers(&fs_info->generic_worker);
+       ret |= btrfs_start_workers(&fs_info->submit_workers);
+       ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+       ret |= btrfs_start_workers(&fs_info->fixup_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+       ret |= btrfs_start_workers(&fs_info->delayed_workers);
+       ret |= btrfs_start_workers(&fs_info->caching_workers);
+       ret |= btrfs_start_workers(&fs_info->readahead_workers);
+       if (ret) {
+               ret = -ENOMEM;
+               goto fail_sb_buffer;
+       }
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2573,22 +2581,10 @@ static int write_dev_supers(struct btrfs_device *device,
        int errors = 0;
        u32 crc;
        u64 bytenr;
-       int last_barrier = 0;
 
        if (max_mirrors == 0)
                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
 
-       /* make sure only the last submit_bh does a barrier */
-       if (do_barriers) {
-               for (i = 0; i < max_mirrors; i++) {
-                       bytenr = btrfs_sb_offset(i);
-                       if (bytenr + BTRFS_SUPER_INFO_SIZE >=
-                           device->total_bytes)
-                               break;
-                       last_barrier = i;
-               }
-       }
-
        for (i = 0; i < max_mirrors; i++) {
                bytenr = btrfs_sb_offset(i);
                if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
@@ -2634,17 +2630,136 @@ static int write_dev_supers(struct btrfs_device *device,
                        bh->b_end_io = btrfs_end_buffer_write_sync;
                }
 
-               if (i == last_barrier && do_barriers)
-                       ret = submit_bh(WRITE_FLUSH_FUA, bh);
-               else
-                       ret = submit_bh(WRITE_SYNC, bh);
-
+               /*
+                * we fua the first super.  The others we allow
+                * to go down lazy.
+                */
+               ret = submit_bh(WRITE_FUA, bh);
                if (ret)
                        errors++;
        }
        return errors < i ? 0 : -1;
 }
 
+/*
+ * endio for the write_dev_flush, this will wake anyone waiting
+ * for the barrier when it is done
+ */
+static void btrfs_end_empty_barrier(struct bio *bio, int err)
+{
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+       }
+       if (bio->bi_private)
+               complete(bio->bi_private);
+       bio_put(bio);
+}
+
+/*
+ * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
+ * sent down.  With wait == 1, it waits for the previous flush.
+ *
+ * any device where the flush fails with eopnotsupp are flagged as not-barrier
+ * capable
+ */
+static int write_dev_flush(struct btrfs_device *device, int wait)
+{
+       struct bio *bio;
+       int ret = 0;
+
+       if (device->nobarriers)
+               return 0;
+
+       if (wait) {
+               bio = device->flush_bio;
+               if (!bio)
+                       return 0;
+
+               wait_for_completion(&device->flush_wait);
+
+               if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
+                       printk("btrfs: disabling barriers on dev %s\n",
+                              device->name);
+                       device->nobarriers = 1;
+               }
+               if (!bio_flagged(bio, BIO_UPTODATE)) {
+                       ret = -EIO;
+               }
+
+               /* drop the reference from the wait == 0 run */
+               bio_put(bio);
+               device->flush_bio = NULL;
+
+               return ret;
+       }
+
+       /*
+        * one reference for us, and we leave it for the
+        * caller
+        */
+       device->flush_bio = NULL;;
+       bio = bio_alloc(GFP_NOFS, 0);
+       if (!bio)
+               return -ENOMEM;
+
+       bio->bi_end_io = btrfs_end_empty_barrier;
+       bio->bi_bdev = device->bdev;
+       init_completion(&device->flush_wait);
+       bio->bi_private = &device->flush_wait;
+       device->flush_bio = bio;
+
+       bio_get(bio);
+       submit_bio(WRITE_FLUSH, bio);
+
+       return 0;
+}
+
+/*
+ * send an empty flush down to each device in parallel,
+ * then wait for them
+ */
+static int barrier_all_devices(struct btrfs_fs_info *info)
+{
+       struct list_head *head;
+       struct btrfs_device *dev;
+       int errors = 0;
+       int ret;
+
+       /* send down all the barriers */
+       head = &info->fs_devices->devices;
+       list_for_each_entry_rcu(dev, head, dev_list) {
+               if (!dev->bdev) {
+                       errors++;
+                       continue;
+               }
+               if (!dev->in_fs_metadata || !dev->writeable)
+                       continue;
+
+               ret = write_dev_flush(dev, 0);
+               if (ret)
+                       errors++;
+       }
+
+       /* wait for all the barriers */
+       list_for_each_entry_rcu(dev, head, dev_list) {
+               if (!dev->bdev) {
+                       errors++;
+                       continue;
+               }
+               if (!dev->in_fs_metadata || !dev->writeable)
+                       continue;
+
+               ret = write_dev_flush(dev, 1);
+               if (ret)
+                       errors++;
+       }
+       if (errors)
+               return -EIO;
+       return 0;
+}
+
 int write_all_supers(struct btrfs_root *root, int max_mirrors)
 {
        struct list_head *head;
@@ -2666,6 +2781,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
 
        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
        head = &root->fs_info->fs_devices->devices;
+
+       if (do_barriers)
+               barrier_all_devices(root->fs_info);
+
        list_for_each_entry_rcu(dev, head, dev_list) {
                if (!dev->bdev) {
                        total_errors++;
index b232150b5b6b7500b337337ad07bd03e66f3e407..f5fbe576d2baf48519a01bd449344b49edffa070 100644 (file)
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                             struct btrfs_root *root,
                             int load_cache_only)
 {
+       DEFINE_WAIT(wait);
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_caching_control *caching_ctl;
        int ret = 0;
 
-       smp_mb();
-       if (cache->cached != BTRFS_CACHE_NO)
+       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
+       BUG_ON(!caching_ctl);
+
+       INIT_LIST_HEAD(&caching_ctl->list);
+       mutex_init(&caching_ctl->mutex);
+       init_waitqueue_head(&caching_ctl->wait);
+       caching_ctl->block_group = cache;
+       caching_ctl->progress = cache->key.objectid;
+       atomic_set(&caching_ctl->count, 1);
+       caching_ctl->work.func = caching_thread;
+
+       spin_lock(&cache->lock);
+       /*
+        * This should be a rare occasion, but this could happen I think in the
+        * case where one thread starts to load the space cache info, and then
+        * some other thread starts a transaction commit which tries to do an
+        * allocation while the other thread is still loading the space cache
+        * info.  The previous loop should have kept us from choosing this block
+        * group, but if we've moved to the state where we will wait on caching
+        * block groups we need to first check if we're doing a fast load here,
+        * so we can wait for it to finish, otherwise we could end up allocating
+        * from a block group who's cache gets evicted for one reason or
+        * another.
+        */
+       while (cache->cached == BTRFS_CACHE_FAST) {
+               struct btrfs_caching_control *ctl;
+
+               ctl = cache->caching_ctl;
+               atomic_inc(&ctl->count);
+               prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock(&cache->lock);
+
+               schedule();
+
+               finish_wait(&ctl->wait, &wait);
+               put_caching_control(ctl);
+               spin_lock(&cache->lock);
+       }
+
+       if (cache->cached != BTRFS_CACHE_NO) {
+               spin_unlock(&cache->lock);
+               kfree(caching_ctl);
                return 0;
+       }
+       WARN_ON(cache->caching_ctl);
+       cache->caching_ctl = caching_ctl;
+       cache->cached = BTRFS_CACHE_FAST;
+       spin_unlock(&cache->lock);
 
        /*
         * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        if (trans && (!trans->transaction->in_commit) &&
            (root && root != root->fs_info->tree_root) &&
            btrfs_test_opt(root, SPACE_CACHE)) {
-               spin_lock(&cache->lock);
-               if (cache->cached != BTRFS_CACHE_NO) {
-                       spin_unlock(&cache->lock);
-                       return 0;
-               }
-               cache->cached = BTRFS_CACHE_STARTED;
-               spin_unlock(&cache->lock);
-
                ret = load_free_space_cache(fs_info, cache);
 
                spin_lock(&cache->lock);
                if (ret == 1) {
+                       cache->caching_ctl = NULL;
                        cache->cached = BTRFS_CACHE_FINISHED;
                        cache->last_byte_to_unpin = (u64)-1;
                } else {
-                       cache->cached = BTRFS_CACHE_NO;
+                       if (load_cache_only) {
+                               cache->caching_ctl = NULL;
+                               cache->cached = BTRFS_CACHE_NO;
+                       } else {
+                               cache->cached = BTRFS_CACHE_STARTED;
+                       }
                }
                spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
                if (ret == 1) {
+                       put_caching_control(caching_ctl);
                        free_excluded_extents(fs_info->extent_root, cache);
                        return 0;
                }
+       } else {
+               /*
+                * We are not going to do the fast caching, set cached to the
+                * appropriate value and wakeup any waiters.
+                */
+               spin_lock(&cache->lock);
+               if (load_cache_only) {
+                       cache->caching_ctl = NULL;
+                       cache->cached = BTRFS_CACHE_NO;
+               } else {
+                       cache->cached = BTRFS_CACHE_STARTED;
+               }
+               spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
        }
 
-       if (load_cache_only)
-               return 0;
-
-       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
-       BUG_ON(!caching_ctl);
-
-       INIT_LIST_HEAD(&caching_ctl->list);
-       mutex_init(&caching_ctl->mutex);
-       init_waitqueue_head(&caching_ctl->wait);
-       caching_ctl->block_group = cache;
-       caching_ctl->progress = cache->key.objectid;
-       /* one for caching kthread, one for caching block group list */
-       atomic_set(&caching_ctl->count, 2);
-       caching_ctl->work.func = caching_thread;
-
-       spin_lock(&cache->lock);
-       if (cache->cached != BTRFS_CACHE_NO) {
-               spin_unlock(&cache->lock);
-               kfree(caching_ctl);
+       if (load_cache_only) {
+               put_caching_control(caching_ctl);
                return 0;
        }
-       cache->caching_ctl = caching_ctl;
-       cache->cached = BTRFS_CACHE_STARTED;
-       spin_unlock(&cache->lock);
 
        down_write(&fs_info->extent_commit_sem);
+       atomic_inc(&caching_ctl->count);
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
        up_write(&fs_info->extent_commit_sem);
 
@@ -2781,7 +2822,7 @@ out_free:
        btrfs_release_path(path);
 out:
        spin_lock(&block_group->lock);
-       if (!ret)
+       if (!ret && dcs == BTRFS_DC_SETUP)
                block_group->cache_generation = trans->transid;
        block_group->disk_cache_state = dcs;
        spin_unlock(&block_group->lock);
@@ -3847,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
        return ret;
 }
 
-int btrfs_block_rsv_refill(struct btrfs_root *root,
-                         struct btrfs_block_rsv *block_rsv,
-                         u64 min_reserved)
+static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
+                                          struct btrfs_block_rsv *block_rsv,
+                                          u64 min_reserved, int flush)
 {
        u64 num_bytes = 0;
        int ret = -ENOSPC;
@@ -3868,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
        if (!ret)
                return 0;
 
-       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
        if (!ret) {
                block_rsv_add_bytes(block_rsv, num_bytes, 0);
                return 0;
@@ -3877,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
        return ret;
 }
 
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+                          struct btrfs_block_rsv *block_rsv,
+                          u64 min_reserved)
+{
+       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
+}
+
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+                                  struct btrfs_block_rsv *block_rsv,
+                                  u64 min_reserved)
+{
+       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
+}
+
 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
                            struct btrfs_block_rsv *dst_rsv,
                            u64 num_bytes)
@@ -4149,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
        u64 to_reserve = 0;
+       u64 csum_bytes;
        unsigned nr_extents = 0;
+       int extra_reserve = 0;
        int flush = 1;
        int ret;
 
+       /* Need to be holding the i_mutex here if we aren't free space cache */
        if (btrfs_is_free_space_inode(root, inode))
                flush = 0;
+       else
+               WARN_ON(!mutex_is_locked(&inode->i_mutex));
 
        if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
@@ -4165,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        BTRFS_I(inode)->outstanding_extents++;
 
        if (BTRFS_I(inode)->outstanding_extents >
-           BTRFS_I(inode)->reserved_extents) {
+           BTRFS_I(inode)->reserved_extents)
                nr_extents = BTRFS_I(inode)->outstanding_extents -
                        BTRFS_I(inode)->reserved_extents;
-               BTRFS_I(inode)->reserved_extents += nr_extents;
-       }
 
        /*
         * Add an item to reserve for updating the inode when we complete the
@@ -4177,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
         */
        if (!BTRFS_I(inode)->delalloc_meta_reserved) {
                nr_extents++;
-               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               extra_reserve = 1;
        }
 
        to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+       csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
 
        ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4191,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
                spin_lock(&BTRFS_I(inode)->lock);
                dropped = drop_outstanding_extent(inode);
-               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
-               spin_unlock(&BTRFS_I(inode)->lock);
-               to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
                /*
-                * Somebody could have come in and twiddled with the
-                * reservation, so if we have to free more than we would have
-                * reserved from this reservation go ahead and release those
-                * bytes.
+                * If the inodes csum_bytes is the same as the original
+                * csum_bytes then we know we haven't raced with any free()ers
+                * so we can just reduce our inodes csum bytes and carry on.
+                * Otherwise we have to do the normal free thing to account for
+                * the case that the free side didn't free up its reserve
+                * because of this outstanding reservation.
                 */
-               to_free -= to_reserve;
+               if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+                       calc_csum_metadata_size(inode, num_bytes, 0);
+               else
+                       to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+               spin_unlock(&BTRFS_I(inode)->lock);
+               if (dropped)
+                       to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
                if (to_free)
                        btrfs_block_rsv_release(root, block_rsv, to_free);
                return ret;
        }
 
+       spin_lock(&BTRFS_I(inode)->lock);
+       if (extra_reserve) {
+               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               nr_extents--;
+       }
+       BTRFS_I(inode)->reserved_extents += nr_extents;
+       spin_unlock(&BTRFS_I(inode)->lock);
+
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        return 0;
@@ -5052,11 +5124,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_root *root = orig_root->fs_info->extent_root;
        struct btrfs_free_cluster *last_ptr = NULL;
        struct btrfs_block_group_cache *block_group = NULL;
+       struct btrfs_block_group_cache *used_block_group;
        int empty_cluster = 2 * 1024 * 1024;
        int allowed_chunk_alloc = 0;
        int done_chunk_alloc = 0;
        struct btrfs_space_info *space_info;
-       int last_ptr_loop = 0;
        int loop = 0;
        int index = 0;
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5118,6 +5190,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 ideal_cache:
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
+               used_block_group = block_group;
                /*
                 * we don't want to use the block group if it doesn't match our
                 * allocation bits, or if its not cached.
@@ -5155,6 +5228,7 @@ search:
                u64 offset;
                int cached;
 
+               used_block_group = block_group;
                btrfs_get_block_group(block_group);
                search_start = block_group->key.objectid;
 
@@ -5178,13 +5252,15 @@ search:
                }
 
 have_block_group:
-               if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+               cached = block_group_cache_done(block_group);
+               if (unlikely(!cached)) {
                        u64 free_percent;
 
+                       found_uncached_bg = true;
                        ret = cache_block_group(block_group, trans,
                                                orig_root, 1);
                        if (block_group->cached == BTRFS_CACHE_FINISHED)
-                               goto have_block_group;
+                               goto alloc;
 
                        free_percent = btrfs_block_group_used(&block_group->item);
                        free_percent *= 100;
@@ -5206,7 +5282,6 @@ have_block_group:
                                                        orig_root, 0);
                                BUG_ON(ret);
                        }
-                       found_uncached_bg = true;
 
                        /*
                         * If loop is set for cached only, try the next block
@@ -5216,94 +5291,80 @@ have_block_group:
                                goto loop;
                }
 
-               cached = block_group_cache_done(block_group);
-               if (unlikely(!cached))
-                       found_uncached_bg = true;
-
+alloc:
                if (unlikely(block_group->ro))
                        goto loop;
 
                spin_lock(&block_group->free_space_ctl->tree_lock);
                if (cached &&
                    block_group->free_space_ctl->free_space <
-                   num_bytes + empty_size) {
+                   num_bytes + empty_cluster + empty_size) {
                        spin_unlock(&block_group->free_space_ctl->tree_lock);
                        goto loop;
                }
                spin_unlock(&block_group->free_space_ctl->tree_lock);
 
                /*
-                * Ok we want to try and use the cluster allocator, so lets look
-                * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
-                * have tried the cluster allocator plenty of times at this
-                * point and not have found anything, so we are likely way too
-                * fragmented for the clustering stuff to find anything, so lets
-                * just skip it and let the allocator find whatever block it can
-                * find
+                * Ok we want to try and use the cluster allocator, so
+                * lets look there
                 */
-               if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+               if (last_ptr) {
                        /*
                         * the refill lock keeps out other
                         * people trying to start a new cluster
                         */
                        spin_lock(&last_ptr->refill_lock);
-                       if (last_ptr->block_group &&
-                           (last_ptr->block_group->ro ||
-                           !block_group_bits(last_ptr->block_group, data))) {
-                               offset = 0;
+                       used_block_group = last_ptr->block_group;
+                       if (used_block_group != block_group &&
+                           (!used_block_group ||
+                            used_block_group->ro ||
+                            !block_group_bits(used_block_group, data))) {
+                               used_block_group = block_group;
                                goto refill_cluster;
                        }
 
-                       offset = btrfs_alloc_from_cluster(block_group, last_ptr,
-                                                num_bytes, search_start);
+                       if (used_block_group != block_group)
+                               btrfs_get_block_group(used_block_group);
+
+                       offset = btrfs_alloc_from_cluster(used_block_group,
+                         last_ptr, num_bytes, used_block_group->key.objectid);
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
                                goto checks;
                        }
 
-                       spin_lock(&last_ptr->lock);
-                       /*
-                        * whoops, this cluster doesn't actually point to
-                        * this block group.  Get a ref on the block
-                        * group is does point to and try again
-                        */
-                       if (!last_ptr_loop && last_ptr->block_group &&
-                           last_ptr->block_group != block_group &&
-                           index <=
-                                get_block_group_index(last_ptr->block_group)) {
-
-                               btrfs_put_block_group(block_group);
-                               block_group = last_ptr->block_group;
-                               btrfs_get_block_group(block_group);
-                               spin_unlock(&last_ptr->lock);
-                               spin_unlock(&last_ptr->refill_lock);
-
-                               last_ptr_loop = 1;
-                               search_start = block_group->key.objectid;
-                               /*
-                                * we know this block group is properly
-                                * in the list because
-                                * btrfs_remove_block_group, drops the
-                                * cluster before it removes the block
-                                * group from the list
-                                */
-                               goto have_block_group;
+                       WARN_ON(last_ptr->block_group != used_block_group);
+                       if (used_block_group != block_group) {
+                               btrfs_put_block_group(used_block_group);
+                               used_block_group = block_group;
                        }
-                       spin_unlock(&last_ptr->lock);
 refill_cluster:
+                       BUG_ON(used_block_group != block_group);
+                       /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+                        * set up a new clusters, so lets just skip it
+                        * and let the allocator find whatever block
+                        * it can find.  If we reach this point, we
+                        * will have tried the cluster allocator
+                        * plenty of times and not have found
+                        * anything, so we are likely way too
+                        * fragmented for the clustering stuff to find
+                        * anything.  */
+                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                               spin_unlock(&last_ptr->refill_lock);
+                               goto unclustered_alloc;
+                       }
+
                        /*
                         * this cluster didn't work out, free it and
                         * start over
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
-                       last_ptr_loop = 0;
-
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
-                                              offset, num_bytes,
+                                              search_start, num_bytes,
                                               empty_cluster + empty_size);
                        if (ret == 0) {
                                /*
@@ -5339,6 +5400,7 @@ refill_cluster:
                        goto loop;
                }
 
+unclustered_alloc:
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
                /*
@@ -5365,14 +5427,14 @@ checks:
                search_start = stripe_align(root, offset);
                /* move on to the next group */
                if (search_start + num_bytes >= search_end) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
                /* move on to the next group */
                if (search_start + num_bytes >
-                   block_group->key.objectid + block_group->key.offset) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                   used_block_group->key.objectid + used_block_group->key.offset) {
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5380,14 +5442,14 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
 
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+               ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
                                                  alloc_type);
                if (ret == -EAGAIN) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5396,15 +5458,19 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
                break;
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
                BUG_ON(index != get_block_group_index(block_group));
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
        }
        up_read(&space_info->groups_sem);
index 1f87c4d0e7a072c6361fb218b32f024b111bb0a8..49f3c9dc09f4c81902299fd81c62da1ed8423250 100644 (file)
@@ -935,8 +935,10 @@ again:
        node = tree_search(tree, start);
        if (!node) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = insert_state(tree, prealloc, start, end, &bits);
                prealloc = NULL;
                BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
         */
        if (state->start < start) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = split_state(tree, state, prealloc, start);
                BUG_ON(err == -EEXIST);
                prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
                        this_end = last_start - 1;
 
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                /*
                 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
         */
        if (state->start <= end && state->end > end) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
@@ -2285,16 +2293,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                clean_io_failure(start, page);
                }
                if (!uptodate) {
-                       u64 failed_mirror;
-                       failed_mirror = (u64)bio->bi_bdev;
-                       if (tree->ops && tree->ops->readpage_io_failed_hook)
-                               ret = tree->ops->readpage_io_failed_hook(
-                                               bio, page, start, end,
-                                               failed_mirror, state);
-                       else
-                               ret = bio_readpage_error(bio, page, start, end,
-                                                        failed_mirror, NULL);
+                       int failed_mirror;
+                       failed_mirror = (int)(unsigned long)bio->bi_bdev;
+                       /*
+                        * The generic bio_readpage_error handles errors the
+                        * following way: If possible, new read requests are
+                        * created and submitted and will end up in
+                        * end_bio_extent_readpage as well (if we're lucky, not
+                        * in the !uptodate case). In that case it returns 0 and
+                        * we just go on with the next page in our bio. If it
+                        * can't handle the error it will return -EIO and we
+                        * remain responsible for that page.
+                        */
+                       ret = bio_readpage_error(bio, page, start, end,
+                                                       failed_mirror, NULL);
                        if (ret == 0) {
+error_handled:
                                uptodate =
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                uncache_state(&cached);
                                continue;
                        }
+                       if (tree->ops && tree->ops->readpage_io_failed_hook) {
+                               ret = tree->ops->readpage_io_failed_hook(
+                                                       bio, page, start, end,
+                                                       failed_mirror, state);
+                               if (ret == 0)
+                                       goto error_handled;
+                       }
                }
 
                if (uptodate) {
@@ -3366,6 +3387,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -ENOMEM;
        path->leave_spinning = 1;
 
+       start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
+       len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
+
        /*
         * lookup the last file extent.  We're not using i_size here
         * because there might be preallocation past i_size
@@ -3413,7 +3437,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
                         &cached_state, GFP_NOFS);
 
-       em = get_extent_skip_holes(inode, off, last_for_get_extent,
+       em = get_extent_skip_holes(inode, start, last_for_get_extent,
                                   get_extent);
        if (!em)
                goto out;
index feb9be0e23bcca09a77497d08c74a3c8864d2a8c..7604c30013227fd823b1523503f8faaeccf283c4 100644 (file)
@@ -70,7 +70,7 @@ struct extent_io_ops {
                              unsigned long bio_flags);
        int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
        int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
-                                      u64 start, u64 end, u64 failed_mirror,
+                                      u64 start, u64 end, int failed_mirror,
                                       struct extent_state *state);
        int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
                                        u64 start, u64 end,
index dafdfa059bf66a489bd3d858990b9025fd50a72f..97fbe939c050dc7d523baeaed960741c053cbf0d 100644 (file)
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
        nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
                     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
                     (sizeof(struct page *)));
+       nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+       nrptrs = max(nrptrs, 8);
        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
        if (!pages)
                return -ENOMEM;
@@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                goto out;
        }
 
-       file_update_time(file);
+       err = btrfs_update_time(file);
+       if (err) {
+               mutex_unlock(&inode->i_mutex);
+               goto out;
+       }
        BTRFS_I(inode)->sequence++;
 
        start_pos = round_down(pos, root->sectorsize);
index 181760f9d2abb7aa0902a65cc93effb3f3d1b41d..ec23d43d0c357870a14bf6e79a0ce5baeb93a43d 100644 (file)
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
                }
        }
 
+       for (i = 0; i < io_ctl->num_pages; i++) {
+               clear_page_dirty_for_io(io_ctl->pages[i]);
+               set_page_extent_mapped(io_ctl->pages[i]);
+       }
+
        return 0;
 }
 
@@ -1465,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
 {
        info->offset = offset_to_bitmap(ctl, offset);
        info->bytes = 0;
+       INIT_LIST_HEAD(&info->list);
        link_free_space(ctl, info);
        ctl->total_bitmaps++;
 
@@ -1844,7 +1850,13 @@ again:
                info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                          1, 0);
                if (!info) {
-                       WARN_ON(1);
+                       /* the tree logging code might be calling us before we
+                        * have fully loaded the free space rbtree for this
+                        * block group.  So it is possible the entry won't
+                        * be in the rbtree yet at all.  The caching code
+                        * will make sure not to put it in the rbtree if
+                        * the logging code has pinned it.
+                        */
                        goto out_lock;
                }
        }
@@ -2308,6 +2320,7 @@ again:
 
        if (!found) {
                start = i;
+               cluster->max_size = 0;
                found = true;
        }
 
@@ -2451,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
-       struct rb_node *node;
        int ret = -ENOSPC;
+       u64 bitmap_offset = offset_to_bitmap(ctl, offset);
 
        if (ctl->total_bitmaps == 0)
                return -ENOSPC;
 
        /*
-        * First check our cached list of bitmaps and see if there is an entry
-        * here that will work.
+        * The bitmap that covers offset won't be in the list unless offset
+        * is just its start offset.
         */
+       entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
+       if (entry->offset != bitmap_offset) {
+               entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
+               if (entry && list_empty(&entry->list))
+                       list_add(&entry->list, bitmaps);
+       }
+
        list_for_each_entry(entry, bitmaps, list) {
                if (entry->bytes < min_bytes)
                        continue;
@@ -2471,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
        }
 
        /*
-        * If we do have entries on our list and we are here then we didn't find
-        * anything, so go ahead and get the next entry after the last entry in
-        * this list and start the search from there.
+        * The bitmaps list has all the bitmaps that record free space
+        * starting after offset, so no more search is required.
         */
-       if (!list_empty(bitmaps)) {
-               entry = list_entry(bitmaps->prev, struct btrfs_free_space,
-                                  list);
-               node = rb_next(&entry->offset_index);
-               if (!node)
-                       return -ENOSPC;
-               entry = rb_entry(node, struct btrfs_free_space, offset_index);
-               goto search;
-       }
-
-       entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
-       if (!entry)
-               return -ENOSPC;
-
-search:
-       node = &entry->offset_index;
-       do {
-               entry = rb_entry(node, struct btrfs_free_space, offset_index);
-               node = rb_next(&entry->offset_index);
-               if (!entry->bitmap)
-                       continue;
-               if (entry->bytes < min_bytes)
-                       continue;
-               ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
-                                          bytes, min_bytes);
-       } while (ret && node);
-
-       return ret;
+       return -ENOSPC;
 }
 
 /*
@@ -2520,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                             u64 offset, u64 bytes, u64 empty_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
-       struct list_head bitmaps;
        struct btrfs_free_space *entry, *tmp;
+       LIST_HEAD(bitmaps);
        u64 min_bytes;
        int ret;
 
@@ -2560,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                goto out;
        }
 
-       INIT_LIST_HEAD(&bitmaps);
        ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
                                      bytes, min_bytes);
        if (ret)
index 116ab67a06dfcae9929669392f7267ed925c806b..fd1a06df5bc637c5dad0b0b7e5ce1ce88d5c1616 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/falloc.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/mount.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        /* insert an orphan item to track this unlinked/truncated file */
        if (insert >= 1) {
                ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
-               BUG_ON(ret);
+               BUG_ON(ret && ret != -EEXIST);
        }
 
        /* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                if (ret && ret != -ESTALE)
                        goto out;
 
+               if (ret == -ESTALE && root == root->fs_info->tree_root) {
+                       struct btrfs_root *dead_root;
+                       struct btrfs_fs_info *fs_info = root->fs_info;
+                       int is_dead_root = 0;
+
+                       /*
+                        * this is an orphan in the tree root. Currently these
+                        * could come from 2 sources:
+                        *  a) a snapshot deletion in progress
+                        *  b) a free space cache inode
+                        * We need to distinguish those two, as the snapshot
+                        * orphan must not get deleted.
+                        * find_dead_roots already ran before us, so if this
+                        * is a snapshot deletion, we should find the root
+                        * in the dead_roots list
+                        */
+                       spin_lock(&fs_info->trans_lock);
+                       list_for_each_entry(dead_root, &fs_info->dead_roots,
+                                           root_list) {
+                               if (dead_root->root_key.objectid ==
+                                   found_key.objectid) {
+                                       is_dead_root = 1;
+                                       break;
+                               }
+                       }
+                       spin_unlock(&fs_info->trans_lock);
+                       if (is_dead_root) {
+                               /* prevent this orphan from being found again */
+                               key.offset = found_key.objectid - 1;
+                               continue;
+                       }
+               }
                /*
                 * Inode is already gone but the orphan item is still there,
                 * kill the orphan item.
@@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
+                       /*
+                        * Need to hold the imutex for reservation purposes, not
+                        * a huge deal here but I have a WARN_ON in
+                        * btrfs_delalloc_reserve_space to catch offenders.
+                        */
+                       mutex_lock(&inode->i_mutex);
                        ret = btrfs_truncate(inode);
+                       mutex_unlock(&inode->i_mutex);
                } else {
                        nr_unlink++;
                }
@@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        u64 hint_byte = 0;
                        hole_size = last_byte - cur_offset;
 
-                       trans = btrfs_start_transaction(root, 2);
+                       trans = btrfs_start_transaction(root, 3);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                break;
@@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                                 cur_offset + hole_size,
                                                 &hint_byte, 1);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
 
+                       btrfs_update_inode(trans, root, inode);
                        btrfs_end_transaction(trans, root);
                }
                free_extent_map(em);
@@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize)
 {
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_trans_handle *trans;
        loff_t oldsize = i_size_read(inode);
        int ret;
 
@@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                return 0;
 
        if (newsize > oldsize) {
-               i_size_write(inode, newsize);
-               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
                truncate_pagecache(inode, oldsize, newsize);
                ret = btrfs_cont_expand(inode, oldsize, newsize);
-               if (ret) {
-                       btrfs_setsize(inode, oldsize);
+               if (ret)
                        return ret;
-               }
 
-               mark_inode_dirty(inode);
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+
+               i_size_write(inode, newsize);
+               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+               ret = btrfs_update_inode(trans, root, inode);
+               btrfs_end_transaction_throttle(trans, root);
        } else {
 
                /*
@@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (attr->ia_valid) {
                setattr_copy(inode, attr);
-               mark_inode_dirty(inode);
+               err = btrfs_dirty_inode(inode);
 
-               if (attr->ia_valid & ATTR_MODE)
+               if (!err && attr->ia_valid & ATTR_MODE)
                        err = btrfs_acl_chmod(inode);
        }
 
@@ -3490,7 +3538,7 @@ void btrfs_evict_inode(struct inode *inode)
         * doing the truncate.
         */
        while (1) {
-               ret = btrfs_block_rsv_refill(root, rsv, min_size);
+               ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
 
                /*
                 * Try and steal from the global reserve since we will
@@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  * FIXME, needs more benchmarking...there are no reasons other than performance
  * to keep or drop this code.
  */
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        int ret;
 
        if (BTRFS_I(inode)->dummy_inode)
-               return;
+               return 0;
 
        trans = btrfs_join_transaction(root);
-       BUG_ON(IS_ERR(trans));
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
        ret = btrfs_update_inode(trans, root, inode);
        if (ret && ret == -ENOSPC) {
                /* whoops, lets try again with the full transaction */
                btrfs_end_transaction(trans, root);
                trans = btrfs_start_transaction(root, 1);
-               if (IS_ERR(trans)) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %ld\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      PTR_ERR(trans));
-                       return;
-               }
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
 
                ret = btrfs_update_inode(trans, root, inode);
-               if (ret) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %d\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      ret);
-               }
        }
        btrfs_end_transaction(trans, root);
        if (BTRFS_I(inode)->delayed_node)
                btrfs_balance_delayed_items(root);
+
+       return ret;
+}
+
+/*
+ * This is a copy of file_update_time.  We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct timespec now;
+       int ret;
+       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+       /* First try to exhaust all avenues to not sync */
+       if (IS_NOCMTIME(inode))
+               return 0;
+
+       now = current_fs_time(inode->i_sb);
+       if (!timespec_equal(&inode->i_mtime, &now))
+               sync_it = S_MTIME;
+
+       if (!timespec_equal(&inode->i_ctime, &now))
+               sync_it |= S_CTIME;
+
+       if (IS_I_VERSION(inode))
+               sync_it |= S_VERSION;
+
+       if (!sync_it)
+               return 0;
+
+       /* Finally allowed to write? Takes lock. */
+       if (mnt_want_write_file(file))
+               return 0;
+
+       /* Only change inode inside the lock region */
+       if (sync_it & S_VERSION)
+               inode_inc_iversion(inode);
+       if (sync_it & S_CTIME)
+               inode->i_ctime = now;
+       if (sync_it & S_MTIME)
+               inode->i_mtime = now;
+       ret = btrfs_dirty_inode(inode);
+       if (!ret)
+               mark_inode_dirty_sync(inode);
+       mnt_drop_write(file->f_path.mnt);
+       return ret;
 }
 
 /*
@@ -4504,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
        int err = btrfs_add_link(trans, dir, inode,
                                 dentry->d_name.name, dentry->d_name.len,
                                 backref, index);
-       if (!err) {
-               d_instantiate(dentry, inode);
-               return 0;
-       }
        if (err > 0)
                err = -EEXIST;
        return err;
@@ -4555,13 +4637,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+
+       inode->i_op = &btrfs_special_inode_operations;
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
-               inode->i_op = &btrfs_special_inode_operations;
                init_special_inode(inode, inode->i_mode, rdev);
                btrfs_update_inode(trans, root, inode);
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4613,15 +4703,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4679,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                struct dentry *parent = dentry->d_parent;
                err = btrfs_update_inode(trans, root, inode);
                BUG_ON(err);
+               d_instantiate(dentry, inode);
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
 
@@ -6303,7 +6402,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        u64 page_start;
        u64 page_end;
 
+       /* Need this to keep space reservations serialized */
+       mutex_lock(&inode->i_mutex);
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+       mutex_unlock(&inode->i_mutex);
+       if (!ret)
+               ret = btrfs_update_time(vma->vm_file);
        if (ret) {
                if (ret == -ENOMEM)
                        ret = VM_FAULT_OOM;
@@ -6515,8 +6619,9 @@ static int btrfs_truncate(struct inode *inode)
                        /* Just need the 1 for updating the inode */
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
-                               err = PTR_ERR(trans);
-                               goto out;
+                               ret = err = PTR_ERR(trans);
+                               trans = NULL;
+                               break;
                        }
                }
 
@@ -6794,11 +6899,13 @@ static int btrfs_getattr(struct vfsmount *mnt,
                         struct dentry *dentry, struct kstat *stat)
 {
        struct inode *inode = dentry->d_inode;
+       u32 blocksize = inode->i_sb->s_blocksize;
+
        generic_fillattr(inode, stat);
        stat->dev = BTRFS_I(inode)->root->anon_dev;
        stat->blksize = PAGE_CACHE_SIZE;
-       stat->blocks = (inode_get_bytes(inode) +
-                       BTRFS_I(inode)->delalloc_bytes) >> 9;
+       stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
+               ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
        return 0;
 }
 
@@ -7074,14 +7181,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
        if (drop_inode)
@@ -7130,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                drop_inode = 1;
 
 out_unlock:
+       if (!err)
+               d_instantiate(dentry, inode);
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
        if (drop_inode) {
@@ -7351,6 +7467,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
        .getattr        = btrfs_getattr,
+       .setattr        = btrfs_setattr,
        .permission     = btrfs_permission,
        .setxattr       = btrfs_setxattr,
        .getxattr       = btrfs_getxattr,
index 4a34c472f1261bea0c3c228e9800d04a26a6d714..c04f02c7d5bbea215557a1d8ae2fc8ccce6e5862 100644 (file)
@@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
 
+       btrfs_update_iflags(inode);
+       inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
 
-       btrfs_update_iflags(inode);
-       inode->i_ctime = CURRENT_TIME;
        btrfs_end_transaction(trans, root);
 
        mnt_drop_write(file->f_path.mnt);
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
                return 0;
        file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 
+       mutex_lock(&inode->i_mutex);
        ret = btrfs_delalloc_reserve_space(inode,
                                           num_pages << PAGE_CACHE_SHIFT);
+       mutex_unlock(&inode->i_mutex);
        if (ret)
                return ret;
 again:
@@ -1216,12 +1218,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                *devstr = '\0';
                devstr = vol_args->name;
                devid = simple_strtoull(devstr, &end, 10);
-               printk(KERN_INFO "resizing devid %llu\n",
+               printk(KERN_INFO "btrfs: resizing devid %llu\n",
                       (unsigned long long)devid);
        }
        device = btrfs_find_device(root, devid, NULL, NULL);
        if (!device) {
-               printk(KERN_INFO "resizer unable to find device %llu\n",
+               printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
                       (unsigned long long)devid);
                ret = -EINVAL;
                goto out_unlock;
@@ -1267,7 +1269,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
        do_div(new_size, root->sectorsize);
        new_size *= root->sectorsize;
 
-       printk(KERN_INFO "new size for %s is %llu\n",
+       printk(KERN_INFO "btrfs: new size for %s is %llu\n",
                device->name, (unsigned long long)new_size);
 
        if (new_size > old_size) {
@@ -1278,7 +1280,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                }
                ret = btrfs_grow_device(trans, device, new_size);
                btrfs_commit_transaction(trans, root);
-       } else {
+       } else if (new_size < old_size) {
                ret = btrfs_shrink_device(device, new_size);
        }
 
@@ -2930,11 +2932,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
                goto out;
 
        for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
-               rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val;
+               rel_ptr = ipath->fspath->val[i] -
+                         (u64)(unsigned long)ipath->fspath->val;
                ipath->fspath->val[i] = rel_ptr;
        }
 
-       ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size);
+       ret = copy_to_user((void *)(unsigned long)ipa->fspath,
+                          (void *)(unsigned long)ipath->fspath, size);
        if (ret) {
                ret = -EFAULT;
                goto out;
@@ -3017,7 +3021,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
        if (ret < 0)
                goto out;
 
-       ret = copy_to_user((void *)loi->inodes, (void *)inodes, size);
+       ret = copy_to_user((void *)(unsigned long)loi->inodes,
+                          (void *)(unsigned long)inodes, size);
        if (ret)
                ret = -EFAULT;
 
index dff29d5e151a3b80d516be44cc1ebdf0d00adad6..cfb55434a46981fa64416e68fa3fd29cf58238f5 100644 (file)
@@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
        index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
        last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
        while (index <= last_index) {
+               mutex_lock(&inode->i_mutex);
                ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+               mutex_unlock(&inode->i_mutex);
                if (ret)
                        goto out;
 
index f4190f22edfb2663d345d9189ee2c618804f885b..ddf2c90d3fc0c475cbfabf6397c84f734abcc5e8 100644 (file)
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
        btrfs_release_path(swarn->path);
 
        ipath = init_ipath(4096, local_root, swarn->path);
+       if (IS_ERR(ipath)) {
+               ret = PTR_ERR(ipath);
+               ipath = NULL;
+               goto err;
+       }
        ret = paths_from_inode(inum, ipath);
 
        if (ret < 0)
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
                        swarn->logical, swarn->dev->name,
                        (unsigned long long)swarn->sector, root, inum, offset,
                        min(isize - offset, (u64)PAGE_SIZE), nlink,
-                       (char *)ipath->fspath->val[i]);
+                       (char *)(unsigned long)ipath->fspath->val[i]);
 
        free_ipath(ipath);
        return 0;
@@ -1530,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
+       int ret = 0;
 
        mutex_lock(&fs_info->scrub_lock);
        if (fs_info->scrub_workers_refcnt == 0) {
                btrfs_init_workers(&fs_info->scrub_workers, "scrub",
                           fs_info->thread_pool_size, &fs_info->generic_worker);
                fs_info->scrub_workers.idle_thresh = 4;
-               btrfs_start_workers(&fs_info->scrub_workers, 1);
+               ret = btrfs_start_workers(&fs_info->scrub_workers);
+               if (ret)
+                       goto out;
        }
        ++fs_info->scrub_workers_refcnt;
+out:
        mutex_unlock(&fs_info->scrub_lock);
 
-       return 0;
+       return ret;
 }
 
 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
index 8bd9d6d0e07ae97cc2060cb0695468635b422a7e..200f63bc6675eca20cf1b55c9ced534efccaf63b 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #include <linux/cleancache.h>
 #include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
@@ -825,13 +826,9 @@ static char *setup_root_args(char *args)
 static struct dentry *mount_subvol(const char *subvol_name, int flags,
                                   const char *device_name, char *data)
 {
-       struct super_block *s;
        struct dentry *root;
        struct vfsmount *mnt;
-       struct mnt_namespace *ns_private;
        char *newargs;
-       struct path path;
-       int error;
 
        newargs = setup_root_args(data);
        if (!newargs)
@@ -842,39 +839,17 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
        if (IS_ERR(mnt))
                return ERR_CAST(mnt);
 
-       ns_private = create_mnt_ns(mnt);
-       if (IS_ERR(ns_private)) {
-               mntput(mnt);
-               return ERR_CAST(ns_private);
-       }
+       root = mount_subtree(mnt, subvol_name);
 
-       /*
-        * This will trigger the automount of the subvol so we can just
-        * drop the mnt we have here and return the dentry that we
-        * found.
-        */
-       error = vfs_path_lookup(mnt->mnt_root, mnt, subvol_name,
-                               LOOKUP_FOLLOW, &path);
-       put_mnt_ns(ns_private);
-       if (error)
-               return ERR_PTR(error);
-
-       if (!is_subvolume_inode(path.dentry->d_inode)) {
-               path_put(&path);
-               mntput(mnt);
-               error = -EINVAL;
+       if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) {
+               struct super_block *s = root->d_sb;
+               dput(root);
+               root = ERR_PTR(-EINVAL);
+               deactivate_locked_super(s);
                printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n",
                                subvol_name);
-               return ERR_PTR(-EINVAL);
        }
 
-       /* Get a ref to the sb and the dentry we found and return it */
-       s = path.mnt->mnt_sb;
-       atomic_inc(&s->s_active);
-       root = dget(path.dentry);
-       path_put(&path);
-       down_write(&s->s_umount);
-
        return root;
 }
 
@@ -1079,11 +1054,11 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        u64 avail_space;
        u64 used_space;
        u64 min_stripe_size;
-       int min_stripes = 1;
+       int min_stripes = 1, num_stripes = 1;
        int i = 0, nr_devices;
        int ret;
 
-       nr_devices = fs_info->fs_devices->rw_devices;
+       nr_devices = fs_info->fs_devices->open_devices;
        BUG_ON(!nr_devices);
 
        devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1093,20 +1068,24 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
 
        /* calc min stripe number for data space alloction */
        type = btrfs_get_alloc_profile(root, 1);
-       if (type & BTRFS_BLOCK_GROUP_RAID0)
+       if (type & BTRFS_BLOCK_GROUP_RAID0) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID1)
+               num_stripes = nr_devices;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID10)
+               num_stripes = 2;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
                min_stripes = 4;
+               num_stripes = 4;
+       }
 
        if (type & BTRFS_BLOCK_GROUP_DUP)
                min_stripe_size = 2 * BTRFS_STRIPE_LEN;
        else
                min_stripe_size = BTRFS_STRIPE_LEN;
 
-       list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
-               if (!device->in_fs_metadata)
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               if (!device->in_fs_metadata || !device->bdev)
                        continue;
 
                avail_space = device->total_bytes - device->bytes_used;
@@ -1167,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        i = nr_devices - 1;
        avail_space = 0;
        while (nr_devices >= min_stripes) {
+               if (num_stripes > nr_devices)
+                       num_stripes = nr_devices;
+
                if (devices_info[i].max_avail >= min_stripe_size) {
                        int j;
                        u64 alloc_size;
 
-                       avail_space += devices_info[i].max_avail * min_stripes;
+                       avail_space += devices_info[i].max_avail * num_stripes;
                        alloc_size = devices_info[i].max_avail;
-                       for (j = i + 1 - min_stripes; j <= i; j++)
+                       for (j = i + 1 - num_stripes; j <= i; j++)
                                devices_info[j].max_avail -= alloc_size;
                }
                i--;
@@ -1290,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
        return 0;
 }
 
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+       int ret;
+
+       ret = btrfs_dirty_inode(inode);
+       if (ret)
+               printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+                                  "error %d\n", btrfs_ino(inode), ret);
+}
+
 static const struct super_operations btrfs_super_ops = {
        .drop_inode     = btrfs_drop_inode,
        .evict_inode    = btrfs_evict_inode,
@@ -1297,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
        .sync_fs        = btrfs_sync_fs,
        .show_options   = btrfs_show_options,
        .write_inode    = btrfs_write_inode,
-       .dirty_inode    = btrfs_dirty_inode,
+       .dirty_inode    = btrfs_fs_dirty_inode,
        .alloc_inode    = btrfs_alloc_inode,
        .destroy_inode  = btrfs_destroy_inode,
        .statfs         = btrfs_statfs,
index 6a0574e923bc6a33b4965976328222dcf0a61cb0..81376d94cd3c6a4639ebef35df501dbefbfb2435 100644 (file)
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
 
                        btrfs_save_ino_cache(root, trans);
 
+                       /* see comments in should_cow_block() */
+                       root->force_cow = 0;
+                       smp_wmb();
+
                        if (root->commit_root != root->node) {
                                mutex_lock(&root->fs_commit_mutex);
                                switch_commit_root(root);
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        btrfs_tree_unlock(old);
        free_extent_buffer(old);
 
+       /* see comments in should_cow_block() */
+       root->force_cow = 1;
+       smp_wmb();
+
        btrfs_set_root_node(new_root_item, tmp);
        /* record when the snapshot was created in key.offset */
        key.offset = trans->transid;
index c37433d3cd82464adbe13173433521a4ab1cca14..f4b839fd3c9dd5cd854cb7bada4e3831d8ea1713 100644 (file)
@@ -295,6 +295,12 @@ loop_lock:
                        btrfs_requeue_work(&device->work);
                        goto done;
                }
+               /* unplug every 64 requests just for good measure */
+               if (batch_run % 64 == 0) {
+                       blk_finish_plug(&plug);
+                       blk_start_plug(&plug);
+                       sync_pending = 0;
+               }
        }
 
        cond_resched();
@@ -1611,7 +1617,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
                return -EINVAL;
 
-       bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+       bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
                                  root->fs_info->bdev_holder);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
@@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                 */
                if (atomic_read(&bbio->error) > bbio->max_errors) {
                        err = -EIO;
-               } else if (err) {
+               } else {
                        /*
                         * this bio is actually up to date, we didn't
                         * go over the max number of errors
index ab5b1c49f3529e9e7e112649b98f22d0a923fb35..78f2d4d4f37fe81317395688a8b090b71e53a612 100644 (file)
@@ -100,6 +100,12 @@ struct btrfs_device {
        struct reada_zone *reada_curr_zone;
        struct radix_tree_root reada_zones;
        struct radix_tree_root reada_extents;
+
+       /* for sending down flush barriers */
+       struct bio *flush_bio;
+       struct completion flush_wait;
+       int nobarriers;
+
 };
 
 struct btrfs_fs_devices {
index 4144caf2f9d3a5ef95a7a9162b1de0cfa326cf4e..173b1d22e59b5a4bf8ed714f72b233cb70537468 100644 (file)
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
        snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
 
        /* dirty the head */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_head_snapc == NULL)
                ci->i_head_snapc = ceph_get_snap_context(snapc);
        ++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
             ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
             ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
             snapc, snapc->seq, snapc->num_snaps);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* now adjust page */
        spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
        struct ceph_snap_context *snapc = NULL;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
                     capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return snapc;
 }
 
index 0f327c6c967954ec329f25bb44f45a1e4219cd71..8b53193e4f7ca67e3011c8501c8f1fee62357f9d 100644 (file)
@@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
 /*
  * Find ceph_cap for given mds, if any.
  *
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
  */
 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
@@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
        struct ceph_cap *cap;
 
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return cap;
 }
 
@@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 
 int ceph_get_cap_mds(struct inode *inode)
 {
+       struct ceph_inode_info *ci = ceph_inode(inode);
        int mds;
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mds = __ceph_get_cap_mds(ceph_inode(inode));
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 }
 
 /*
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static void __insert_cap_node(struct ceph_inode_info *ci,
                              struct ceph_cap *new)
@@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
  *
  * If I_FLUSH is set, leave the inode at the front of the list.
  *
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
  *    -> we take mdsc->cap_delay_lock
  */
 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 /*
  * Cancel delayed work on cap.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
                wanted |= ceph_caps_for_mode(fmode);
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
        if (!cap) {
                if (new_cap) {
                        cap = new_cap;
                        new_cap = NULL;
                } else {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        new_cap = get_cap(mdsc, caps_reservation);
                        if (new_cap == NULL)
                                return -ENOMEM;
@@ -625,7 +626,7 @@ retry:
 
        if (fmode >= 0)
                __ceph_get_fmode(ci, fmode);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        wake_up_all(&ci->i_cap_wq);
        return 0;
 }
@@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
        struct rb_node *p;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
                cap = rb_entry(p, struct ceph_cap, ci_node);
                if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
                        break;
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("ceph_caps_revoking %p %s = %d\n", inode,
             ceph_cap_string(mask), ret);
        return ret;
@@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 }
 
 /*
- * called under i_lock
+ * called under i_ceph_lock
  */
 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 {
@@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 /*
  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
  *
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
  * caller will not hold session s_mutex if called from destroy_inode.
  */
 void __ceph_remove_cap(struct ceph_cap *cap)
@@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
 
 /*
  * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
@@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
 
 /*
  * Send a cap msg on the given inode.  Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
  *
  * Make note of max_size reported/requested from mds, revoked caps
  * that have now been implemented.
@@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
  * Return non-zero if delayed release, or we experienced an error
  * such that the caller should requeue + retry later.
  *
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
  * caller should hold snap_rwsem (read), s_mutex.
  */
 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                      int op, int used, int want, int retain, int flushing,
                      unsigned *pflush_tid)
-       __releases(cap->ci->vfs_inode->i_lock)
+       __releases(cap->ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = cap->ci;
        struct inode *inode = &ci->vfs_inode;
@@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                xattr_version = ci->i_xattrs.version;
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
  * Unless @again is true, skip cap_snaps that were already sent to
  * the MDS (i.e., during this session).
  *
- * Called under i_lock.  Takes s_mutex as needed.
+ * Called under i_ceph_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
                        struct ceph_mds_session **psession,
                        int again)
-               __releases(ci->vfs_inode->i_lock)
-               __acquires(ci->vfs_inode->i_lock)
+               __releases(ci->i_ceph_lock)
+               __acquires(ci->i_ceph_lock)
 {
        struct inode *inode = &ci->vfs_inode;
        int mds;
@@ -1261,7 +1262,7 @@ retry:
                        session = NULL;
                }
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        mutex_lock(&mdsc->mutex);
                        session = __ceph_lookup_mds_session(mdsc, mds);
                        mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@ retry:
                         * deletion or migration.  retry, and we'll
                         * get a better @mds value next time.
                         */
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        goto retry;
                }
 
@@ -1285,7 +1286,7 @@ retry:
                        list_del_init(&capsnap->flushing_item);
                list_add_tail(&capsnap->flushing_item,
                              &session->s_cap_snaps_flushing);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@ retry:
                next_follows = capsnap->follows + 1;
                ceph_put_cap_snap(capsnap);
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                goto retry;
        }
 
@@ -1322,11 +1323,9 @@ out:
 
 static void ceph_flush_snaps(struct ceph_inode_info *ci)
 {
-       struct inode *inode = &ci->vfs_inode;
-
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_flush_snaps(ci, NULL, 0);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
  * Add dirty inode to the flushing list.  Assigned a seq number so we
  * can wait for caps to flush without starving.
  *
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static int __mark_caps_flushing(struct inode *inode,
                                 struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        u32 invalidating_gen = ci->i_rdcache_gen;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        invalidate_mapping_pages(&inode->i_data, 0, -1);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (inode->i_data.nrpages == 0 &&
            invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
        if (mdsc->stopping)
                is_delayed = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (ci->i_ceph_flags & CEPH_I_FLUSH)
                flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                __ceph_flush_snaps(ci, &session, 0);
        goto retry_locked;
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry_locked:
        file_wanted = __ceph_caps_file_wanted(ci);
        used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@ ack:
                        if (mutex_trylock(&session->s_mutex) == 0) {
                                dout("inverting session/ino locks on %p\n",
                                     session);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                if (took_snap_rwsem) {
                                        up_read(&mdsc->snap_rwsem);
                                        took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@ ack:
                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
                                dout("inverting snap/in locks on %p\n",
                                     inode);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                down_read(&mdsc->snap_rwsem);
                                took_snap_rwsem = 1;
                                goto retry;
@@ -1664,10 +1663,10 @@ ack:
                mds = cap->mds;  /* remember mds, so we don't repeat */
                sent++;
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
                                      retain, flushing, NULL);
-               goto retry; /* retake i_lock and restart our cap scan. */
+               goto retry; /* retake i_ceph_lock and restart our cap scan. */
        }
 
        /*
@@ -1681,7 +1680,7 @@ ack:
        else if (!is_delayed || force_requeue)
                __cap_delay_requeue(mdsc, ci);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
        int flushing = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
                goto out;
@@ -1716,7 +1715,7 @@ retry:
                int delayed;
 
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        session = cap->session;
                        mutex_lock(&session->s_mutex);
                        goto retry;
@@ -1727,18 +1726,18 @@ retry:
 
                flushing = __mark_caps_flushing(inode, session);
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
                                     cap->issued | cap->implemented, flushing,
                                     flush_tid);
                if (!delayed)
                        goto out_unlocked;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __cap_delay_requeue(mdsc, ci);
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 out_unlocked:
        if (session && unlock_session)
                mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int i, ret = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (i = 0; i < CEPH_CAP_BITS; i++)
                if ((ci->i_flushing_caps & (1 << i)) &&
                    ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
                        ret = 0;
                        break;
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (__ceph_caps_dirty(ci))
                        __cap_delay_requeue_front(mdsc, ci);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return err;
 }
@@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                struct inode *inode = &ci->vfs_inode;
                struct ceph_cap *cap;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
                }
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                struct ceph_cap *cap;
                int delayed = 0;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                                             cap->issued | cap->implemented,
                                             ci->i_flushing_caps, NULL);
                        if (delayed) {
-                               spin_lock(&inode->i_lock);
+                               spin_lock(&ci->i_ceph_lock);
                                __cap_delay_requeue(mdsc, ci);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                        }
                } else {
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        }
 }
@@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
        struct ceph_cap *cap;
        int delayed = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = ci->i_auth_cap;
        dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
             ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
                                     cap->issued | cap->implemented,
                                     ci->i_flushing_caps, NULL);
                if (delayed) {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
  * Take references to capabilities we hold, so that we don't release
  * them to the MDS prematurely.
  *
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
  */
 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
 {
@@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
 
        dout("get_cap_refs %p need %s want %s\n", inode,
             ceph_cap_string(need), ceph_cap_string(want));
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure file is actually open */
        file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
                     ceph_cap_string(have), ceph_cap_string(need));
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("get_cap_refs %p ret %d got %s\n", inode,
             ret, ceph_cap_string(*got));
        return ret;
@@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
        int check = 0;
 
        /* do we need to explicitly request a larger max_size? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((endoff >= ci->i_max_size ||
             endoff > (inode->i_size << 1)) &&
            endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
                ci->i_wanted_max_size = endoff;
                check = 1;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (check)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
@@ -2140,9 +2139,9 @@ retry:
  */
 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
 {
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __take_cap_refs(ci, caps);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
        int last = 0, put = 0, flushsnaps = 0, wake = 0;
        struct ceph_cap_snap *capsnap;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (had & CEPH_CAP_PIN)
                --ci->i_pin_ref;
        if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                                }
                        }
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
             last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
        int found = 0;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_wrbuffer_ref -= nr;
        last = !ci->i_wrbuffer_ref;
 
@@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                }
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last) {
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
  * actually be a revocation if it specifies a smaller cap set.)
  *
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
  *
  * return value:
  *  0 - ok
@@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
                             struct ceph_mds_session *session,
                             struct ceph_cap *cap,
                             struct ceph_buffer *xattr_buf)
-               __releases(inode->i_lock)
+               __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        }
        BUG_ON(cap->issued & ~cap->implemented);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (writeback)
                /*
                 * queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                 struct ceph_mds_caps *m,
                                 struct ceph_mds_session *session,
                                 struct ceph_cap *cap)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
        wake_up_all(&ci->i_cap_wq);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
             inode, ci, session->s_mds, follows);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                if (capsnap->follows == follows) {
                        if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                             capsnap, capsnap->follows);
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
 static void handle_cap_trunc(struct inode *inode,
                             struct ceph_mds_caps *trunc,
                             struct ceph_mds_session *session)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
             inode, mds, seq, truncate_size, truncate_seq);
        queue_trunc = ceph_fill_file_size(inode, issued,
                                          truncate_seq, truncate_size, size);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_trunc)
                ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
             inode, ci, mds, mseq);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure we haven't seen a higher mseq */
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        }
        /* else, we already released it */
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
        up_read(&mdsc->snap_rwsem);
 
        /* make sure we re-request max_size, if necessary */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_requested_max_size = 0;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        struct ceph_mds_client *mdsc = session->s_mdsc;
        struct super_block *sb = mdsc->fsc->sb;
        struct inode *inode;
+       struct ceph_inode_info *ci;
        struct ceph_cap *cap;
        struct ceph_mds_caps *h;
        int mds = session->s_mds;
@@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 
        /* lookup ino */
        inode = ceph_find_inode(sb, vino);
+       ci = ceph_inode(inode);
        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
             vino.snap, inode);
        if (!inode) {
@@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        }
 
        /* the rest require a cap */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ceph_inode(inode), mds);
        if (!cap) {
                dout(" no cap on %p ino %llx.%llx from mds%d\n",
                     inode, ceph_ino(inode), ceph_snap(inode), mds);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto flush_cap_releases;
        }
 
-       /* note that each of these drops i_lock for us */
+       /* note that each of these drops i_ceph_lock for us */
        switch (op) {
        case CEPH_CAP_OP_REVOKE:
        case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                break;
 
        default:
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
                       ceph_cap_op_name(op));
        }
@@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
        struct inode *inode = &ci->vfs_inode;
        int last = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
        if (--ci->i_nr_by_mode[fmode] == 0)
                last++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last && ci->i_vino.snap == CEPH_NOSNAP)
                ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
        int used, dirty;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                             inode, cap, ceph_cap_string(cap->issued));
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
 
        /*
         * force an record for the directory caps if we have a dentry lease.
-        * this is racy (can't take i_lock and d_lock together), but it
+        * this is racy (can't take i_ceph_lock and d_lock together), but it
         * doesn't have to be perfect; the mds will revoke anything we don't
         * release.
         */
index 2abd0dfad7f8093c252e572d90fdd42991b410be..98954003a8d313007386d4cfc214c5dba9296647 100644 (file)
@@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
        }
 
        /* can we use the dcache? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((filp->f_pos == 2 || fi->dentry) &&
            !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
            ceph_snap(inode) != CEPH_SNAPDIR &&
            ceph_dir_test_complete(inode) &&
            __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = __dcache_readdir(filp, dirent, filldir);
                if (err != -EAGAIN)
                        return err;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        if (fi->dentry) {
                err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@ more:
         * were released during the whole readdir, and we should have
         * the complete dir contents in our cache.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_release_count == fi->dir_release_count) {
                ceph_dir_set_complete(inode);
                ci->i_max_offset = filp->f_pos;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("readdir %p filp %p done.\n", inode, filp);
        return 0;
@@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                struct ceph_inode_info *ci = ceph_inode(dir);
                struct ceph_dentry_info *di = ceph_dentry(dentry);
 
-               spin_lock(&dir->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
                if (strncmp(dentry->d_name.name,
                            fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                    !is_root_ceph_dentry(dir, dentry) &&
                    ceph_dir_test_complete(dir) &&
                    (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
-                       spin_unlock(&dir->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        dout(" dir %p complete, -ENOENT\n", dir);
                        d_add(dentry, NULL);
                        di->lease_shared_gen = ci->i_shared_gen;
                        return NULL;
                }
-               spin_unlock(&dir->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 
        op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (inode->i_nlink == 1) {
                drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
                ci->i_ceph_flags |= CEPH_I_NODELAY;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return drop;
 }
 
@@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        int valid = 0;
 
-       spin_lock(&dir->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_shared_gen == di->lease_shared_gen)
                valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
-       spin_unlock(&dir->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
             dir, (unsigned)ci->i_shared_gen, dentry,
             (unsigned)di->lease_shared_gen, valid);
@@ -1094,42 +1094,19 @@ static int ceph_snapdir_d_revalidate(struct dentry *dentry,
 /*
  * Set/clear/test dir complete flag on the dir's dentry.
  */
-static struct dentry * __d_find_any_alias(struct inode *inode)
-{
-       struct dentry *alias;
-
-       if (list_empty(&inode->i_dentry))
-               return NULL;
-       alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
-       return alias;
-}
-
 void ceph_dir_set_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-       
-       if (dentry && ceph_dentry(dentry)) {
-               dout(" marking %p (%p) complete\n", inode, dentry);
-               set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-       }
+       /* not yet implemented */
 }
 
 void ceph_dir_clear_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-
-       if (dentry && ceph_dentry(dentry)) {
-               dout(" marking %p (%p) NOT complete\n", inode, dentry);
-               clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-       }
+       /* not yet implemented */
 }
 
 bool ceph_dir_test_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-
-       if (dentry && ceph_dentry(dentry))
-               return test_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
+       /* not yet implemented */
        return false;
 }
 
@@ -1143,7 +1120,7 @@ static void ceph_d_prune(struct dentry *dentry)
 {
        struct ceph_dentry_info *di;
 
-       dout("d_release %p\n", dentry);
+       dout("ceph_d_prune %p\n", dentry);
 
        /* do we have a valid parent? */
        if (!dentry->d_parent || IS_ROOT(dentry))
index ce549d31eeb7934634f4905e00861999cbe1b33f..ed72428d9c75c80a6744ccd6a996b83c1a20d333 100644 (file)
@@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
 
        /* trivially open snapdir */
        if (ceph_snap(inode) == CEPH_SNAPDIR) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
 
@@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
         * write) or any MDS (for read).  Update wanted set
         * asynchronously.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (__ceph_is_any_real_caps(ci) &&
            (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
                int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
                     inode, fmode, ceph_cap_string(wanted),
                     ceph_cap_string(issued));
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                /* adjust wanted? */
                if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
        } else if (ceph_snap(inode) != CEPH_NOSNAP &&
                   (ci->i_snap_caps & wanted) == wanted) {
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
        req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@ retry_snap:
                 */
                int dirty;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                ceph_put_cap_refs(ci, got);
 
                ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@ retry_snap:
 
        if (ret >= 0) {
                int dirty;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
        }
@@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
 
        mutex_lock(&inode->i_mutex);
        __ceph_do_pending_vmtruncate(inode);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+       if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
                ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
                if (ret < 0) {
                        offset = ret;
index e392bfce84a3cc6873f1439ba8dd5f28fd0cd935..87fb132fb33012a9ca7839e9cca832eb472a1087 100644 (file)
@@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        dout("alloc_inode %p\n", &ci->vfs_inode);
 
+       spin_lock_init(&ci->i_ceph_lock);
+
        ci->i_version = 0;
        ci->i_time_warp_seq = 0;
        ci->i_ceph_flags = 0;
@@ -583,7 +585,7 @@ static int fill_inode(struct inode *inode,
                               iinfo->xattr_len);
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /*
         * provided version will be odd if inode value is projected,
@@ -680,7 +682,7 @@ static int fill_inode(struct inode *inode,
                        char *sym;
 
                        BUG_ON(symlen != inode->i_size);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        err = -ENOMEM;
                        sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +691,7 @@ static int fill_inode(struct inode *inode,
                        memcpy(sym, iinfo->symlink, symlen);
                        sym[symlen] = 0;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_symlink)
                                ci->i_symlink = sym;
                        else
@@ -715,7 +717,7 @@ static int fill_inode(struct inode *inode,
        }
 
 no_change:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* queue truncate if we saw i_size decrease */
        if (queue_trunc)
@@ -750,13 +752,13 @@ no_change:
                                     info->cap.flags,
                                     caps_reservation);
                } else {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        dout(" %p got snap_caps %s\n", inode,
                             ceph_cap_string(le32_to_cpu(info->cap.caps)));
                        ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
                        if (cap_fmode >= 0)
                                __ceph_get_fmode(ci, cap_fmode);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else if (cap_fmode >= 0) {
                pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +851,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
 {
        struct dentry *dir = dn->d_parent;
        struct inode *inode = dir->d_inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_dentry_info *di;
 
        BUG_ON(!inode);
 
        di = ceph_dentry(dn);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (!ceph_dir_test_complete(inode)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
        di->offset = ceph_inode(inode)->i_max_offset++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        spin_lock(&dir->d_lock);
        spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1311,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
        inode->i_size = size;
        inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1321,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
            (ci->i_reported_size << 1) < ci->i_max_size)
                ret = 1;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1328,12 +1331,13 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
  */
 void ceph_queue_writeback(struct inode *inode)
 {
+       ihold(inode);
        if (queue_work(ceph_inode_to_client(inode)->wb_wq,
                       &ceph_inode(inode)->i_wb_work)) {
                dout("ceph_queue_writeback %p\n", inode);
-               ihold(inode);
        } else {
                dout("ceph_queue_writeback %p failed\n", inode);
+               iput(inode);
        }
 }
 
@@ -1353,12 +1357,13 @@ static void ceph_writeback_work(struct work_struct *work)
  */
 void ceph_queue_invalidate(struct inode *inode)
 {
+       ihold(inode);
        if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
                       &ceph_inode(inode)->i_pg_inv_work)) {
                dout("ceph_queue_invalidate %p\n", inode);
-               ihold(inode);
        } else {
                dout("ceph_queue_invalidate %p failed\n", inode);
+               iput(inode);
        }
 }
 
@@ -1374,20 +1379,20 @@ static void ceph_invalidate_work(struct work_struct *work)
        u32 orig_gen;
        int check = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("invalidate_pages %p gen %d revoking %d\n", inode,
             ci->i_rdcache_gen, ci->i_rdcache_revoking);
        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
                /* nevermind! */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto out;
        }
        orig_gen = ci->i_rdcache_gen;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(&inode->i_data, 0);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (orig_gen == ci->i_rdcache_gen &&
            orig_gen == ci->i_rdcache_revoking) {
                dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1399,7 +1404,7 @@ static void ceph_invalidate_work(struct work_struct *work)
                     inode, orig_gen, ci->i_rdcache_gen,
                     ci->i_rdcache_revoking);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (check)
                ceph_check_caps(ci, 0, NULL);
@@ -1434,13 +1439,14 @@ void ceph_queue_vmtruncate(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
+       ihold(inode);
        if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
                       &ci->i_vmtruncate_work)) {
                dout("ceph_queue_vmtruncate %p\n", inode);
-               ihold(inode);
        } else {
                dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
                     inode, ci->i_truncate_pending);
+               iput(inode);
        }
 }
 
@@ -1457,10 +1463,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
        int wrbuffer_refs, wake = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_truncate_pending == 0) {
                dout("__do_pending_vmtruncate %p none pending\n", inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
 
@@ -1471,7 +1477,7 @@ retry:
        if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
                dout("__do_pending_vmtruncate %p flushing snaps first\n",
                     inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                filemap_write_and_wait_range(&inode->i_data, 0,
                                             inode->i_sb->s_maxbytes);
                goto retry;
@@ -1481,15 +1487,15 @@ retry:
        wrbuffer_refs = ci->i_wrbuffer_ref;
        dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
             ci->i_truncate_pending, to);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(inode->i_mapping, to);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_truncate_pending--;
        if (ci->i_truncate_pending == 0)
                wake = 1;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (wrbuffer_refs == 0)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1544,7 +1550,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
        dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
 
@@ -1692,7 +1698,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        release &= issued;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (inode_dirty_flags)
                __mark_inode_dirty(inode, inode_dirty_flags);
@@ -1714,7 +1720,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        __ceph_do_pending_vmtruncate(inode);
        return err;
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        ceph_mdsc_put_request(req);
        return err;
 }
index 5a14c29cbba6f82b00ca42e8ac0327dbc12fd8bb..790914a598dd5d68b8f40b851c2faff2e790e4af 100644 (file)
@@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_nr_by_mode[fi->fmode]--;
                fi->fmode |= CEPH_FILE_MODE_LAZY;
                ci->i_nr_by_mode[fi->fmode]++;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout("ioctl_layzio: file %p marked lazy\n", file);
 
                ceph_check_caps(ci, 0, NULL);
index 264ab701154fead54aec35e0f45152e4709f201a..6203d805eb45061d20b5d8e08222f97aae6cd0a6 100644 (file)
@@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                }
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = NULL;
        if (mode == USE_AUTH_MDS)
                cap = ci->i_auth_cap;
        if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
                cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
        if (!cap) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto random;
        }
        mds = cap->session->s_mds;
        dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
             inode, ceph_vinop(inode), mds,
             cap == ci->i_auth_cap ? "auth " : "", cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 
 random:
@@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 
        dout("removing cap %p, ci is %p, inode is %p\n",
             cap, ci, &ci->vfs_inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_remove_cap(cap);
        if (!__ceph_is_any_real_caps(ci)) {
                struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                }
                spin_unlock(&mdsc->cap_dirty_lock);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        while (drop--)
                iput(inode);
        return 0;
@@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
 
        wake_up_all(&ci->i_cap_wq);
        if (arg) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_wanted_max_size = 0;
                ci->i_requested_max_size = 0;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return 0;
 }
@@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        if (session->s_trim_caps <= 0)
                return -1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mine = cap->issued | cap->implemented;
        used = __ceph_caps_used(ci);
        oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
                __ceph_remove_cap(cap);
        } else {
                /* try to drop referring dentries */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                d_prune_aliases(inode);
                dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
                     inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return 0;
 }
 
@@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                           i_flushing_item);
                        struct inode *inode = &ci->vfs_inode;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (ci->i_cap_flush_seq <= want_flush_seq) {
                                dout("check_cap_flush still flushing %p "
                                     "seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                     session->s_mds);
                                ret = 0;
                        }
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
                mutex_unlock(&session->s_mutex);
                ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@ retry:
                             pos, temp);
                } else if (stop_on_nosnap && inode &&
                           ceph_snap(inode) == CEPH_NOSNAP) {
+                       spin_unlock(&temp->d_lock);
                        break;
                } else {
                        pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ceph_dir_clear_complete(inode);
        ci->i_release_count++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (req->r_dentry)
                ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        if (err)
                goto out_free;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
 
@@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                rec.v1.pathbase = cpu_to_le64(pathbase);
                reclen = sizeof(rec.v1);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (recon_state->flock) {
                int num_fcntl_locks, num_flock_locks;
index 4bb239921dbdf98a945b963d2d05ede9ff434736..a50ca0e39475794018c2350570547bff4f6a7df8 100644 (file)
@@ -20,7 +20,7 @@
  *
  *         mdsc->snap_rwsem
  *
- *         inode->i_lock
+ *         ci->i_ceph_lock
  *                 mdsc->snap_flush_lock
  *                 mdsc->cap_delay_lock
  *
index e2643719133323a07a8e69f0f631c306e80f7532..a559c80f127a04353a488181029744a009165f09 100644 (file)
@@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                return;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                kfree(capsnap);
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
  *
  * If capsnap can now be flushed, add to snap_flush list, and return 1.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                            struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
                inode = &ci->vfs_inode;
                ihold(inode);
                spin_unlock(&mdsc->snap_flush_lock);
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_flush_snaps(ci, &session, 0);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                iput(inode);
                spin_lock(&mdsc->snap_flush_lock);
        }
@@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                                continue;
                        ci = ceph_inode(inode);
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_snap_realm)
                                goto skip_inode;
                        /*
@@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        oldrealm = ci->i_snap_realm;
                        ci->i_snap_realm = realm;
                        spin_unlock(&realm->inodes_with_caps_lock);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        ceph_get_snap_realm(mdsc, realm);
                        ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        continue;
 
 skip_inode:
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        iput(inode);
                }
 
index a90846fac759bd744620a8d87789e9a56db02c48..b48f15f101a0ed61127bc920b82afa3d61fcdc0b 100644 (file)
@@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
        if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
                seq_printf(m, ",rsize=%d", fsopt->rsize);
        if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
-               seq_printf(m, ",rasize=%d", fsopt->rsize);
+               seq_printf(m, ",rasize=%d", fsopt->rasize);
        if (fsopt->congestion_kb != default_congestion_kb())
                seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
        if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
@@ -638,10 +638,12 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
        if (err == 0) {
                dout("open_root_inode success\n");
                if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
-                   fsc->sb->s_root == NULL)
+                   fsc->sb->s_root == NULL) {
                        root = d_alloc_root(req->r_target_inode);
-               else
+                       ceph_init_dentry(root);
+               } else {
                        root = d_obtain_alias(req->r_target_inode);
+               }
                req->r_target_inode = NULL;
                dout("open_root_inode success, root dentry is %p\n", root);
        } else {
index 01bf189e08a91387b6f4686b9999fe03ccf8aa32..edcbf3774a56460d377b31a9d51ea543a114a839 100644 (file)
@@ -220,7 +220,7 @@ struct ceph_dentry_info {
  * The locking for D_COMPLETE is a bit odd:
  *  - we can clear it at almost any time (see ceph_d_prune)
  *  - it is only meaningful if:
- *    - we hold dir inode i_lock
+ *    - we hold dir inode i_ceph_lock
  *    - we hold dir FILE_SHARED caps
  *    - the dentry D_COMPLETE is set
  */
@@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
 struct ceph_inode_info {
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
+       spinlock_t i_ceph_lock;
+
        u64 i_version;
        u32 i_time_warp_seq;
 
@@ -271,7 +273,7 @@ struct ceph_inode_info {
 
        struct ceph_inode_xattrs_info i_xattrs;
 
-       /* capabilities.  protected _both_ by i_lock and cap->session's
+       /* capabilities.  protected _both_ by i_ceph_lock and cap->session's
         * s_mutex. */
        struct rb_root i_caps;           /* cap list */
        struct ceph_cap *i_auth_cap;     /* authoritative cap, if any */
@@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags &= ~mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline void ceph_i_set(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags |= mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool r;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = (ci->i_ceph_flags & mask) == mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
 static inline int ceph_caps_issued(struct ceph_inode_info *ci)
 {
        int issued;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return issued;
 }
 
@@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
                                        int touch)
 {
        int r;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = __ceph_caps_issued_mask(ci, mask, touch);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
 extern void __ceph_remove_cap(struct ceph_cap *cap);
 static inline void ceph_remove_cap(struct ceph_cap *cap)
 {
-       struct inode *inode = &cap->ci->vfs_inode;
-       spin_lock(&inode->i_lock);
+       spin_lock(&cap->ci->i_ceph_lock);
        __ceph_remove_cap(cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&cap->ci->i_ceph_lock);
 }
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
index 96c6739a02804f081adde4e2972523e1bf34911c..a5e36e4488a7d6e9f6dc0b9a5b9d440b4229e9c1 100644 (file)
@@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
 }
 
 static int __build_xattrs(struct inode *inode)
-       __releases(inode->i_lock)
-       __acquires(inode->i_lock)
+       __releases(ci->i_ceph_lock)
+       __acquires(ci->i_ceph_lock)
 {
        u32 namelen;
        u32 numattr = 0;
@@ -372,7 +372,7 @@ start:
                end = p + ci->i_xattrs.blob->vec.iov_len;
                ceph_decode_32_safe(&p, end, numattr, bad);
                xattr_version = ci->i_xattrs.version;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
                                 GFP_NOFS);
@@ -387,7 +387,7 @@ start:
                                goto bad_lock;
                }
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.version != xattr_version) {
                        /* lost a race, retry */
                        for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@ start:
 
        return err;
 bad_lock:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 bad:
        if (xattrs) {
                for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
        if (vxattrs)
                vxattr = ceph_match_vxattr(vxattrs, name);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto get_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                /* get xattrs from mds (if we don't already have them) */
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (vxattr && vxattr->readonly) {
                err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@ get_xattr:
        memcpy(value, xattr->val, xattr->val_len);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
        u32 len;
        int i;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto list_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        err = __build_xattrs(inode);
        if (err < 0)
@@ -619,7 +619,7 @@ list_xattr:
                }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
        if (!xattr)
                goto out;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry:
        issued = __ceph_caps_issued(ci, NULL);
        if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@ retry:
            required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
                struct ceph_buffer *blob = NULL;
 
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout(" preaallocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
                        goto out;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.prealloc_blob)
                        ceph_buffer_put(ci->i_xattrs.prealloc_blob);
                ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@ retry:
        dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_sync_setxattr(dentry, name, value, size, flags);
 out:
        kfree(newname);
@@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
                        return -EOPNOTSUPP;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __build_xattrs(inode);
        issued = __ceph_caps_issued(ci, NULL);
        dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_send_removexattr(dentry, name);
        return err;
 }
index d6a972df033800eb0c00f6116b3c4b6bb309bd87..f3670cf72587beee30ca9f9bf8152e169d6bd085 100644 (file)
@@ -282,7 +282,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
        byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
        byte_count += total_in_buf2;
        /* don't allow buffer to overflow */
-       if (byte_count > CIFSMaxBufSize)
+       if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
                return -ENOBUFS;
        pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
 
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
        smb_msg.msg_controllen = 0;
 
        for (total_read = 0; to_read; total_read += length, to_read -= length) {
+               try_to_freeze();
+
                if (server_unresponsive(server)) {
                        total_read = -EAGAIN;
                        break;
@@ -2120,7 +2122,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
                warned_on_ntlm = true;
                cERROR(1, "default security mechanism requested.  The default "
                        "security mechanism will be upgraded from ntlm to "
-                       "ntlmv2 in kernel release 3.2");
+                       "ntlmv2 in kernel release 3.3");
        }
        ses->overrideSecFlg = volume_info->secFlg;
 
index cf0b1539b321acf1cdd69e4db0f590d8f83e9293..4dd9283885e745bafdd7dce81e5456f98c5635c9 100644 (file)
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                                         lock->type, lock->netfid, conf_lock);
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
               __u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
        mutex_unlock(&cinode->lock_mutex);
 }
 
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
 static int
 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                 bool wait)
@@ -778,6 +791,13 @@ try_again:
        return rc;
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 {
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
        return rc;
 }
 
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
index 5de03ec20144449c2dea66441b1e66cc577f4a48..a090bbe6ee29e196018867c9f5e4da3efe9d82b9 100644 (file)
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                                 rc);
                        return rc;
                }
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
        }
 
        while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                cFYI(1, "calling findnext2");
                rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
                                  &cifsFile->srch_inf);
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
                if (rc)
                        return -ENOENT;
        }
index 7cacba12b8f114468ef56dab7d58fc5678d0c878..80d850881938d0c0950addc4d97ae4855dadfa4a 100644 (file)
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
 {
        int rc;
        int len;
-       __u16 wpwd[129];
+       __le16 wpwd[129];
 
        /* Password cannot be longer than 128 characters */
        if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
                *wpwd = 0; /* Ensure string is null terminated */
        }
 
-       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
-       memset(wpwd, 0, 129 * sizeof(__u16));
+       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+       memset(wpwd, 0, 129 * sizeof(__le16));
 
        return rc;
 }
index ca418aaf635254dd85722e791510f330c1a5917d..9d8715c45f2574ae473634feee7b0a07ca607355 100644 (file)
@@ -292,7 +292,7 @@ int __init configfs_inode_init(void)
        return bdi_init(&configfs_backing_dev_info);
 }
 
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
 {
        bdi_destroy(&configfs_backing_dev_info);
 }
index ecc62178beda98d3975b2796714ab6d3d6b1968e..276e15cafd58e8b5171b8da6a8e503c99631cabd 100644 (file)
@@ -143,28 +143,26 @@ static int __init configfs_init(void)
                goto out;
 
        config_kobj = kobject_create_and_add("config", kernel_kobj);
-       if (!config_kobj) {
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (!config_kobj)
+               goto out2;
+
+       err = configfs_inode_init();
+       if (err)
+               goto out3;
 
        err = register_filesystem(&configfs_fs_type);
-       if (err) {
-               printk(KERN_ERR "configfs: Unable to register filesystem!\n");
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (err)
+               goto out4;
 
-       err = configfs_inode_init();
-       if (err) {
-               unregister_filesystem(&configfs_fs_type);
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-       }
+       return 0;
+out4:
+       printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+       configfs_inode_exit();
+out3:
+       kobject_put(config_kobj);
+out2:
+       kmem_cache_destroy(configfs_dir_cachep);
+       configfs_dir_cachep = NULL;
 out:
        return err;
 }
index a901c6901bce1cf0a8b1823e8c87d83424594474..89509b5a090e27320e45b9c0c2f5480e082b1a37 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/bit_spinlock.h>
 #include <linux/rculist_bl.h>
 #include <linux/prefetch.h>
+#include <linux/ratelimit.h>
 #include "internal.h"
 
 /*
@@ -2383,8 +2384,16 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                                actual = __d_unalias(inode, dentry, alias);
                        }
                        write_sequnlock(&rename_lock);
-                       if (IS_ERR(actual))
+                       if (IS_ERR(actual)) {
+                               if (PTR_ERR(actual) == -ELOOP)
+                                       pr_warn_ratelimited(
+                                               "VFS: Lookup of '%s' in %s %s"
+                                               " would have caused loop\n",
+                                               dentry->d_name.name,
+                                               inode->i_sb->s_type->name,
+                                               inode->i_sb->s_id);
                                dput(alias);
+                       }
                        goto out_nolock;
                }
        }
@@ -2430,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 /**
  * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
  * Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
  */
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+                       const struct path *root,
                        char **buffer, int *buflen)
 {
        struct dentry *dentry = path->dentry;
@@ -2474,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root,
                dentry = parent;
        }
 
-out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
+out:
        br_read_unlock(vfsmount_lock);
        return error;
 
@@ -2491,15 +2498,17 @@ global_root:
                WARN(1, "Root dentry has weird name <%.*s>\n",
                     (int) dentry->d_name.len, dentry->d_name.name);
        }
-       root->mnt = vfsmnt;
-       root->dentry = dentry;
+       if (!slash)
+               error = prepend(buffer, buflen, "/", 1);
+       if (!error)
+               error = vfsmnt->mnt_ns ? 1 : 2;
        goto out;
 }
 
 /**
  * __d_path - return the path of a dentry
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buf: buffer to return value in
  * @buflen: buffer length
  *
@@ -2510,10 +2519,10 @@ global_root:
  *
  * "buflen" should be positive.
  *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
  */
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+              const struct path *root,
               char *buf, int buflen)
 {
        char *res = buf + buflen;
@@ -2524,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root,
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
 
-       if (error)
+       if (error < 0)
+               return ERR_PTR(error);
+       if (error > 0)
+               return NULL;
+       return res;
+}
+
+char *d_absolute_path(const struct path *path,
+              char *buf, int buflen)
+{
+       struct path root = {};
+       char *res = buf + buflen;
+       int error;
+
+       prepend(&res, &buflen, "\0", 1);
+       write_seqlock(&rename_lock);
+       error = prepend_path(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+
+       if (error > 1)
+               error = -EINVAL;
+       if (error < 0)
                return ERR_PTR(error);
        return res;
 }
@@ -2532,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root,
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
  */
-static int path_with_deleted(const struct path *path, struct path *root,
-                                char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+                            const struct path *root,
+                            char **buf, int *buflen)
 {
        prepend(buf, buflen, "\0", 1);
        if (d_unlinked(path->dentry)) {
@@ -2570,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        /*
@@ -2585,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (error)
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error < 0)
                res = ERR_PTR(error);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2608,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2616,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (!error && !path_equal(&tmp, &root))
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error > 0)
                error = prepend_unreachable(&res, &buflen);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2749,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
-               struct path tmp = root;
                char *cwd = page + PAGE_SIZE;
                int buflen = PAGE_SIZE;
 
                prepend(&cwd, &buflen, "\0", 1);
-               error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+               error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
 
-               if (error)
+               if (error < 0)
                        goto out;
 
                /* Unreachable from current root */
-               if (!path_equal(&tmp, &root)) {
+               if (error > 0) {
                        error = prepend_unreachable(&cwd, &buflen);
                        if (error)
                                goto out;
index 990626e7da80aaf411c67f8e20ca73ac3a46bc0a..0b3109ee42571e258fd4a2ff3ef342e9ddb1658c 100644 (file)
@@ -281,7 +281,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
        } else {
                struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
                struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
-               ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
+               ret6->sin6_addr = in6->sin6_addr;
        }
 
        return 0;
index 58609bde3b9fc076187afa3317582788f2f6bc7f..2a834255c75de911b7e1f8eb10026972913e14b4 100644 (file)
@@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals(
 
 /**
  * ecryptfs_new_file_context
- * @ecryptfs_dentry: The eCryptfs dentry
+ * @ecryptfs_inode: The eCryptfs inode
  *
  * If the crypto context for the file has not yet been established,
  * this is where we do that.  Establishing a new crypto context
@@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals(
  *
  * Returns zero on success; non-zero otherwise
  */
-int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry)
+int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
 {
        struct ecryptfs_crypt_stat *crypt_stat =
-           &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+           &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
            &ecryptfs_superblock_to_private(
-                   ecryptfs_dentry->d_sb)->mount_crypt_stat;
+                   ecryptfs_inode->i_sb)->mount_crypt_stat;
        int cipher_name_len;
        int rc = 0;
 
@@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
 }
 
 static int
-ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry,
+ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
                                    char *virt, size_t virt_len)
 {
        int rc;
 
-       rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
+       rc = ecryptfs_write_lower(ecryptfs_inode, virt,
                                  0, virt_len);
        if (rc < 0)
                printk(KERN_ERR "%s: Error attempting to write header "
@@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
 
 /**
  * ecryptfs_write_metadata
- * @ecryptfs_dentry: The eCryptfs dentry
+ * @ecryptfs_dentry: The eCryptfs dentry, which should be negative
+ * @ecryptfs_inode: The newly created eCryptfs inode
  *
  * Write the file headers out.  This will likely involve a userspace
  * callout, in which the session key is encrypted with one or more
@@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
  *
  * Returns zero on success; non-zero on error
  */
-int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
+int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
+                           struct inode *ecryptfs_inode)
 {
        struct ecryptfs_crypt_stat *crypt_stat =
-               &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+               &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
        unsigned int order;
        char *virt;
        size_t virt_len;
@@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
                rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
                                                      size);
        else
-               rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt,
+               rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
                                                         virt_len);
        if (rc) {
                printk(KERN_ERR "%s: Error writing metadata out to lower file; "
@@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
 
 /* We could either offset on every reverse map or just pad some 0x00's
  * at the front here */
-static const unsigned char filename_rev_map[] = {
+static const unsigned char filename_rev_map[256] = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
@@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = {
        0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
        0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
        0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
-       0x3D, 0x3E, 0x3F
+       0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
 };
 
 /**
index 54481a3b2c7960e6ba205696c9d69ec44b331194..a9f29b12fbf290ba4987f778e582357d38ae1258 100644 (file)
@@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
 int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode);
 int ecryptfs_encrypt_page(struct page *page);
 int ecryptfs_decrypt_page(struct page *page);
-int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
+int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
+                           struct inode *ecryptfs_inode);
 int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
-int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
+int ecryptfs_new_file_context(struct inode *ecryptfs_inode);
 void ecryptfs_write_crypt_stat_flags(char *page_virt,
                                     struct ecryptfs_crypt_stat *crypt_stat,
                                     size_t *written);
index c6ac98cf9baaeca5d898cce216aa0dc08df2716c..d3f95f941c47e68a1d0a2cbe60371f148d64e124 100644 (file)
@@ -139,6 +139,27 @@ out:
        return rc;
 }
 
+static void ecryptfs_vma_close(struct vm_area_struct *vma)
+{
+       filemap_write_and_wait(vma->vm_file->f_mapping);
+}
+
+static const struct vm_operations_struct ecryptfs_file_vm_ops = {
+       .close          = ecryptfs_vma_close,
+       .fault          = filemap_fault,
+};
+
+static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       int rc;
+
+       rc = generic_file_mmap(file, vma);
+       if (!rc)
+               vma->vm_ops = &ecryptfs_file_vm_ops;
+
+       return rc;
+}
+
 struct kmem_cache *ecryptfs_file_info_cache;
 
 /**
@@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-       .mmap = generic_file_mmap,
+       .mmap = ecryptfs_file_mmap,
        .open = ecryptfs_open,
        .flush = ecryptfs_flush,
        .release = ecryptfs_release,
index a36d327f15215e471628e2fda253e84ccd6100e9..32f90a3ae63eb85bc6d162aa16f28d48b3cdac7f 100644 (file)
@@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
  * it. It will also update the eCryptfs directory inode to mimic the
  * stat of the lower directory inode.
  *
- * Returns zero on success; non-zero on error condition
+ * Returns the new eCryptfs inode on success; an ERR_PTR on error condition
  */
-static int
+static struct inode *
 ecryptfs_do_create(struct inode *directory_inode,
                   struct dentry *ecryptfs_dentry, int mode)
 {
        int rc;
        struct dentry *lower_dentry;
        struct dentry *lower_dir_dentry;
+       struct inode *inode;
 
        lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
        lower_dir_dentry = lock_parent(lower_dentry);
        if (IS_ERR(lower_dir_dentry)) {
                ecryptfs_printk(KERN_ERR, "Error locking directory of "
                                "dentry\n");
-               rc = PTR_ERR(lower_dir_dentry);
+               inode = ERR_CAST(lower_dir_dentry);
                goto out;
        }
        rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
@@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode,
        if (rc) {
                printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
                       "rc = [%d]\n", __func__, rc);
+               inode = ERR_PTR(rc);
                goto out_lock;
        }
-       rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
-                               directory_inode->i_sb);
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
+       inode = __ecryptfs_get_inode(lower_dentry->d_inode,
+                                    directory_inode->i_sb);
+       if (IS_ERR(inode))
                goto out_lock;
-       }
        fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
        fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
 out_lock:
        unlock_dir(lower_dir_dentry);
 out:
-       return rc;
+       return inode;
 }
 
 /**
@@ -219,26 +219,26 @@ out:
  *
  * Returns zero on success
  */
-static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
+static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+                                   struct inode *ecryptfs_inode)
 {
        struct ecryptfs_crypt_stat *crypt_stat =
-               &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+               &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
        int rc = 0;
 
-       if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
+       if (S_ISDIR(ecryptfs_inode->i_mode)) {
                ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
                goto out;
        }
        ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
-       rc = ecryptfs_new_file_context(ecryptfs_dentry);
+       rc = ecryptfs_new_file_context(ecryptfs_inode);
        if (rc) {
                ecryptfs_printk(KERN_ERR, "Error creating new file "
                                "context; rc = [%d]\n", rc);
                goto out;
        }
-       rc = ecryptfs_get_lower_file(ecryptfs_dentry,
-                                    ecryptfs_dentry->d_inode);
+       rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to initialize "
                        "the lower file for the dentry with name "
@@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
                        ecryptfs_dentry->d_name.name, rc);
                goto out;
        }
-       rc = ecryptfs_write_metadata(ecryptfs_dentry);
+       rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
        if (rc)
                printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
-       ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
+       ecryptfs_put_lower_file(ecryptfs_inode);
 out:
        return rc;
 }
@@ -269,18 +269,28 @@ static int
 ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
                int mode, struct nameidata *nd)
 {
+       struct inode *ecryptfs_inode;
        int rc;
 
-       /* ecryptfs_do_create() calls ecryptfs_interpose() */
-       rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode);
-       if (unlikely(rc)) {
+       ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
+                                           mode);
+       if (unlikely(IS_ERR(ecryptfs_inode))) {
                ecryptfs_printk(KERN_WARNING, "Failed to create file in"
                                "lower filesystem\n");
+               rc = PTR_ERR(ecryptfs_inode);
                goto out;
        }
        /* At this point, a file exists on "disk"; we need to make sure
         * that this on disk file is prepared to be an ecryptfs file */
-       rc = ecryptfs_initialize_file(ecryptfs_dentry);
+       rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
+       if (rc) {
+               drop_nlink(ecryptfs_inode);
+               unlock_new_inode(ecryptfs_inode);
+               iput(ecryptfs_inode);
+               goto out;
+       }
+       d_instantiate(ecryptfs_dentry, ecryptfs_inode);
+       unlock_new_inode(ecryptfs_inode);
 out:
        return rc;
 }
index f6dba4505f1cc7e4f251f28a18bbeea5b3dcb2a7..12ccacda44e0288e13247e3e79ebd414287eb548 100644 (file)
@@ -565,7 +565,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
        brelse(bitmap_bh);
        printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
               ", computed = %llu, %llu\n",
-              EXT4_B2C(sbi, ext4_free_blocks_count(es)),
+              EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
               desc_count, bitmap_count);
        return bitmap_count;
 #else
index 61fa9e1614afd1922bae4cf5ce0d26dfdbab6b25..607b1557d292d1b24d5a605d28565de17f36bf13 100644 (file)
@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
                  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
                  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
 
-       neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+       neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
        ext4_mark_inode_dirty(handle, inode);
 out:
        brelse(bh);
@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        /* Pre-conditions */
        BUG_ON(!ext4_ext_is_uninitialized(ex));
        BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
-       BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
 
        /*
         * Attempt to transfer newly initialized blocks from the currently
index 240f6e2dc7eeb593e4143e05fc15e4565db155b1..92655fd8965737bac9aa002f173ed607ecdcfa8a 100644 (file)
@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                                        clear_buffer_unwritten(bh);
                                }
 
-                               /* skip page if block allocation undone */
-                               if (buffer_delay(bh) || buffer_unwritten(bh))
+                               /*
+                                * skip page if block allocation undone and
+                                * block is dirty
+                                */
+                               if (ext4_bh_delay_or_unwritten(NULL, bh))
                                        skip_page = 1;
                                bh = bh->b_this_page;
                                block_start += bh->b_size;
@@ -2270,6 +2273,7 @@ retry:
                        ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
                               "%ld pages, ino %lu; err %d", __func__,
                                wbc->nr_to_write, inode->i_ino, ret);
+                       blk_finish_plug(&plug);
                        goto out_writepages;
                }
 
@@ -2386,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index;
        struct inode *inode = mapping->host;
        handle_t *handle;
-       loff_t page_len;
 
        index = pos >> PAGE_CACHE_SHIFT;
 
@@ -2433,13 +2436,6 @@ retry:
                 */
                if (pos + len > inode->i_size)
                        ext4_truncate_failed_write(inode);
-       } else {
-               page_len = pos & (PAGE_CACHE_SIZE - 1);
-               if (page_len > 0) {
-                       ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                               inode, page, pos - page_len, page_len,
-                               EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-               }
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2482,7 +2478,6 @@ static int ext4_da_write_end(struct file *file,
        loff_t new_i_size;
        unsigned long start, end;
        int write_mode = (int)(unsigned long)fsdata;
-       loff_t page_len;
 
        if (write_mode == FALL_BACK_TO_NONDELALLOC) {
                if (ext4_should_order_data(inode)) {
@@ -2507,7 +2502,7 @@ static int ext4_da_write_end(struct file *file,
         */
 
        new_i_size = pos + copied;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
+       if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
                if (ext4_da_should_update_i_disksize(page, end)) {
                        down_write(&EXT4_I(inode)->i_data_sem);
                        if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2531,16 +2526,6 @@ static int ext4_da_write_end(struct file *file,
        }
        ret2 = generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-
-       page_len = PAGE_CACHE_SIZE -
-                       ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
-       if (page_len > 0) {
-               ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                       inode, page, pos + copied - 1, page_len,
-                       EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-       }
-
        copied = ret2;
        if (ret2 < 0)
                ret = ret2;
@@ -2780,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  iocb->private, io_end->inode->i_ino, iocb, offset,
                  size);
 
+       iocb->private = NULL;
+
        /* if not aio dio with unwritten extents, just free io and return */
        if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
                ext4_free_io_end(io_end);
-               iocb->private = NULL;
 out:
                if (is_async)
                        aio_complete(iocb, ret, 0);
@@ -2807,7 +2793,6 @@ out:
 
        /* queue the work to convert unwritten extents to written */
        queue_work(wq, &io_end->work);
-       iocb->private = NULL;
 
        /* XXX: probably should move into the real I/O completion handler */
        inode_dio_done(inode);
@@ -3202,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
 
        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
-       if (!page_has_buffers(page)) {
-               /*
-                * If the range to be discarded covers a partial block
-                * we need to get the page buffers.  This is because
-                * partial blocks cannot be released and the page needs
-                * to be updated with the contents of the block before
-                * we write the zeros on top of it.
-                */
-               if ((from & (blocksize - 1)) ||
-                   ((from + length) & (blocksize - 1))) {
-                       create_empty_buffers(page, blocksize, 0);
-               } else {
-                       /*
-                        * If there are no partial blocks,
-                        * there is nothing to update,
-                        * so we can return now
-                        */
-                       return 0;
-               }
-       }
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, blocksize, 0);
 
        /* Find the buffer that contains "offset" */
        bh = page_buffers(page);
index 7ce1d0b19c94576892d0d80f016f7db44110ccb7..7e106c810c62bb18435d9a0fcb6d5066a3813fad 100644 (file)
@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
                block_end = block_start + blocksize;
                if (block_start >= len) {
+                       /*
+                        * Comments copied from block_write_full_page_endio:
+                        *
+                        * The page straddles i_size.  It must be zeroed out on
+                        * each and every writepage invocation because it may
+                        * be mmapped.  "A file is mapped in multiples of the
+                        * page size.  For a file that is not a multiple of
+                        * the  page size, the remaining memory is zeroed when
+                        * mapped, and writes to that region are not written
+                        * out to the file."
+                        */
+                       zero_user_segment(page, block_start, block_end);
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                        continue;
index 9953d80145ad0f6331086053a7d80301c418f3a7..3e1329e2f826132d8aec264116deb2c572be9ef7 100644 (file)
@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",block_validity");
 
        if (!test_opt(sb, INIT_INODE_TABLE))
-               seq_puts(seq, ",noinit_inode_table");
+               seq_puts(seq, ",noinit_itable");
        else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
-               seq_printf(seq, ",init_inode_table=%u",
+               seq_printf(seq, ",init_itable=%u",
                           (unsigned) sbi->s_li_wait_mult);
 
        ext4_show_quota_options(seq, sb);
@@ -1333,8 +1333,7 @@ enum {
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
        Opt_dioread_nolock, Opt_dioread_lock,
-       Opt_discard, Opt_nodiscard,
-       Opt_init_inode_table, Opt_noinit_inode_table,
+       Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
 };
 
 static const match_table_t tokens = {
@@ -1407,9 +1406,9 @@ static const match_table_t tokens = {
        {Opt_dioread_lock, "dioread_lock"},
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
-       {Opt_init_inode_table, "init_itable=%u"},
-       {Opt_init_inode_table, "init_itable"},
-       {Opt_noinit_inode_table, "noinit_itable"},
+       {Opt_init_itable, "init_itable=%u"},
+       {Opt_init_itable, "init_itable"},
+       {Opt_noinit_itable, "noinit_itable"},
        {Opt_err, NULL},
 };
 
@@ -1683,7 +1682,9 @@ static int parse_options(char *options, struct super_block *sb,
                        data_opt = EXT4_MOUNT_WRITEBACK_DATA;
                datacheck:
                        if (is_remount) {
-                               if (test_opt(sb, DATA_FLAGS) != data_opt) {
+                               if (!sbi->s_journal)
+                                       ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
+                               else if (test_opt(sb, DATA_FLAGS) != data_opt) {
                                        ext4_msg(sb, KERN_ERR,
                                                "Cannot change data mode on remount");
                                        return 0;
@@ -1890,7 +1891,7 @@ set_qf_format:
                case Opt_dioread_lock:
                        clear_opt(sb, DIOREAD_NOLOCK);
                        break;
-               case Opt_init_inode_table:
+               case Opt_init_itable:
                        set_opt(sb, INIT_INODE_TABLE);
                        if (args[0].from) {
                                if (match_int(&args[0], &option))
@@ -1901,7 +1902,7 @@ set_qf_format:
                                return 0;
                        sbi->s_li_wait_mult = option;
                        break;
-               case Opt_noinit_inode_table:
+               case Opt_noinit_itable:
                        clear_opt(sb, INIT_INODE_TABLE);
                        break;
                default:
@@ -3099,8 +3100,6 @@ static void ext4_destroy_lazyinit_thread(void)
 }
 
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
-                               __releases(kernel_lock)
-                               __acquires(kernel_lock)
 {
        char *orig_data = kstrdup(data, GFP_KERNEL);
        struct buffer_head *bh;
index 73c3992b2bb4aa765d3dc2418e907ca0a7ba78f2..517f211a3bd45c60f607e5da7d957bc739d55d47 100644 (file)
@@ -47,17 +47,6 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
-const char *wb_reason_name[] = {
-       [WB_REASON_BACKGROUND]          = "background",
-       [WB_REASON_TRY_TO_FREE_PAGES]   = "try_to_free_pages",
-       [WB_REASON_SYNC]                = "sync",
-       [WB_REASON_PERIODIC]            = "periodic",
-       [WB_REASON_LAPTOP_TIMER]        = "laptop_timer",
-       [WB_REASON_FREE_MORE_MEM]       = "free_more_memory",
-       [WB_REASON_FS_FREE_SPACE]       = "fs_free_space",
-       [WB_REASON_FORKER_THREAD]       = "forker_thread"
-};
-
 /*
  * Include the creation of the trace points after defining the
  * wb_writeback_work structure so that the definition remains local to this
@@ -156,6 +145,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
  * bdi_start_writeback - start writeback
  * @bdi: the backing device to write from
  * @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Description:
  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -1223,6 +1213,7 @@ static void wait_sb_inodes(struct super_block *sb)
  * writeback_inodes_sb_nr -    writeback dirty inodes from given super_block
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1251,6 +1242,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
 /**
  * writeback_inodes_sb -       writeback dirty inodes from given super_block
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1265,6 +1257,7 @@ EXPORT_SYMBOL(writeback_inodes_sb);
 /**
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
@@ -1285,6 +1278,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
index 5cb8614508c339fb5e0c18031f3ebf249a4ec6c0..2aaf3eaaf13da03e328ac3523fe16ca09a60c690 100644 (file)
@@ -1512,7 +1512,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        else if (outarg->offset + num > file_size)
                num = file_size - outarg->offset;
 
-       while (num) {
+       while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
                struct page *page;
                unsigned int this_num;
 
@@ -1526,6 +1526,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
 
                num -= this_num;
                total_len += this_num;
+               index++;
        }
        req->misc.retrieve_in.offset = outarg->offset;
        req->misc.retrieve_in.size = total_len;
index 594f07a81c2899ba33a173be33cfc818afa0d39b..0c84100acd4492966e2fa48de6fbfacca0f10172 100644 (file)
@@ -1556,7 +1556,7 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        mutex_lock(&inode->i_mutex);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+       if (origin != SEEK_CUR && origin != SEEK_SET) {
                retval = fuse_update_attributes(inode, NULL, file, NULL);
                if (retval)
                        goto exit;
@@ -1567,6 +1567,10 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
                offset += i_size_read(inode);
                break;
        case SEEK_CUR:
+               if (offset == 0) {
+                       retval = file->f_pos;
+                       goto exit;
+               }
                offset += file->f_pos;
                break;
        case SEEK_DATA:
index 3e6d727564792edd3b59dd6c509db3173e7a082f..aa83109b94316c9ed86bd0b4b5c30df5d9d9675f 100644 (file)
@@ -1138,28 +1138,28 @@ static int __init fuse_fs_init(void)
 {
        int err;
 
-       err = register_filesystem(&fuse_fs_type);
-       if (err)
-               goto out;
-
-       err = register_fuseblk();
-       if (err)
-               goto out_unreg;
-
        fuse_inode_cachep = kmem_cache_create("fuse_inode",
                                              sizeof(struct fuse_inode),
                                              0, SLAB_HWCACHE_ALIGN,
                                              fuse_inode_init_once);
        err = -ENOMEM;
        if (!fuse_inode_cachep)
-               goto out_unreg2;
+               goto out;
+
+       err = register_fuseblk();
+       if (err)
+               goto out2;
+
+       err = register_filesystem(&fuse_fs_type);
+       if (err)
+               goto out3;
 
        return 0;
 
- out_unreg2:
+ out3:
        unregister_fuseblk();
- out_unreg:
-       unregister_filesystem(&fuse_fs_type);
+ out2:
+       kmem_cache_destroy(fuse_inode_cachep);
  out:
        return err;
 }
index 3b0d05dcd7c1cc8f098205e781536cc2d1605acf..637694bf3a03c5652d780700686a9e719fca2709 100644 (file)
@@ -1205,6 +1205,8 @@ int __break_lease(struct inode *inode, unsigned int mode)
        int want_write = (mode & O_ACCMODE) != O_RDONLY;
 
        new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+       if (IS_ERR(new_fl))
+               return PTR_ERR(new_fl);
 
        lock_flocks();
 
@@ -1221,12 +1223,6 @@ int __break_lease(struct inode *inode, unsigned int mode)
                if (fl->fl_owner == current->files)
                        i_have_this_lease = 1;
 
-       if (IS_ERR(new_fl) && !i_have_this_lease
-                       && ((mode & O_NONBLOCK) == 0)) {
-               error = PTR_ERR(new_fl);
-               goto out;
-       }
-
        break_time = 0;
        if (lease_break_time > 0) {
                break_time = jiffies + lease_break_time * HZ;
@@ -1284,8 +1280,7 @@ restart:
 
 out:
        unlock_flocks();
-       if (!IS_ERR(new_fl))
-               locks_free_lock(new_fl);
+       locks_free_lock(new_fl);
        return error;
 }
 
index 3f32bcb0d9bd5beb882b67480ff3593d202f5837..ef175cb8cfd8e20be4be8496660521a371297a50 100644 (file)
 #include <linux/bitops.h>
 #include <linux/sched.h>
 
-static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
-
 static DEFINE_SPINLOCK(bitmap_lock);
 
-static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
+/*
+ * bitmap consists of blocks filled with 16bit words
+ * bit set == busy, bit clear == free
+ * endianness is a mess, but for counting zero bits it really doesn't matter...
+ */
+static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
 {
-       unsigned i, j, sum = 0;
-       struct buffer_head *bh;
-  
-       for (i=0; i<numblocks-1; i++) {
-               if (!(bh=map[i])) 
-                       return(0);
-               for (j=0; j<bh->b_size; j++)
-                       sum += nibblemap[bh->b_data[j] & 0xf]
-                               + nibblemap[(bh->b_data[j]>>4) & 0xf];
-       }
+       __u32 sum = 0;
+       unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8);
 
-       if (numblocks==0 || !(bh=map[numblocks-1]))
-               return(0);
-       i = ((numbits - (numblocks-1) * bh->b_size * 8) / 16) * 2;
-       for (j=0; j<i; j++) {
-               sum += nibblemap[bh->b_data[j] & 0xf]
-                       + nibblemap[(bh->b_data[j]>>4) & 0xf];
+       while (blocks--) {
+               unsigned words = blocksize / 2;
+               __u16 *p = (__u16 *)(*map++)->b_data;
+               while (words--)
+                       sum += 16 - hweight16(*p++);
        }
 
-       i = numbits%16;
-       if (i!=0) {
-               i = *(__u16 *)(&bh->b_data[j]) | ~((1<<i) - 1);
-               sum += nibblemap[i & 0xf] + nibblemap[(i>>4) & 0xf];
-               sum += nibblemap[(i>>8) & 0xf] + nibblemap[(i>>12) & 0xf];
-       }
-       return(sum);
+       return sum;
 }
 
 void minix_free_block(struct inode *inode, unsigned long block)
@@ -105,10 +93,12 @@ int minix_new_block(struct inode * inode)
        return 0;
 }
 
-unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
+unsigned long minix_count_free_blocks(struct super_block *sb)
 {
-       return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
-               sbi->s_nzones - sbi->s_firstdatazone + 1)
+       struct minix_sb_info *sbi = minix_sb(sb);
+       u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
+
+       return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
                << sbi->s_log_zone_size);
 }
 
@@ -273,7 +263,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
        return inode;
 }
 
-unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
+unsigned long minix_count_free_inodes(struct super_block *sb)
 {
-       return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
+       struct minix_sb_info *sbi = minix_sb(sb);
+       u32 bits = sbi->s_ninodes + 1;
+
+       return count_free(sbi->s_imap, sb->s_blocksize, bits);
 }
index 64cdcd662ffccca98fecad85103f10a2348a76bc..4d46a6a5907052de73638a347a9aa4238fa2bb37 100644 (file)
@@ -263,6 +263,26 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
                goto out_no_root;
        }
 
+       /* Apparently minix can create filesystems that allocate more blocks for
+        * the bitmaps than needed.  We simply ignore that, but verify it didn't
+        * create one with not enough blocks and bail out if so.
+        */
+       block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
+       if (sbi->s_imap_blocks < block) {
+               printk("MINIX-fs: file system does not have enough "
+                               "imap blocks allocated.  Refusing to mount\n");
+               goto out_iput;
+       }
+
+       block = minix_blocks_needed(
+                       (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
+                       s->s_blocksize);
+       if (sbi->s_zmap_blocks < block) {
+               printk("MINIX-fs: file system does not have enough "
+                               "zmap blocks allocated.  Refusing to mount.\n");
+               goto out_iput;
+       }
+
        ret = -ENOMEM;
        s->s_root = d_alloc_root(root_inode);
        if (!s->s_root)
@@ -276,9 +296,10 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
        if (!(sbi->s_mount_state & MINIX_VALID_FS))
                printk("MINIX-fs: mounting unchecked file system, "
                        "running fsck is recommended\n");
-       else if (sbi->s_mount_state & MINIX_ERROR_FS)
+       else if (sbi->s_mount_state & MINIX_ERROR_FS)
                printk("MINIX-fs: mounting file system with errors, "
                        "running fsck is recommended\n");
+
        return 0;
 
 out_iput:
@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_type = sb->s_magic;
        buf->f_bsize = sb->s_blocksize;
        buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
-       buf->f_bfree = minix_count_free_blocks(sbi);
+       buf->f_bfree = minix_count_free_blocks(sb);
        buf->f_bavail = buf->f_bfree;
        buf->f_files = sbi->s_ninodes;
-       buf->f_ffree = minix_count_free_inodes(sbi);
+       buf->f_ffree = minix_count_free_inodes(sb);
        buf->f_namelen = sbi->s_namelen;
        buf->f_fsid.val[0] = (u32)id;
        buf->f_fsid.val[1] = (u32)(id >> 32);
index 341e2122879a7604611426d4f702f211ee3ed59a..26bbd55e82ea2ab42f61fa2898fd45fee17f7074 100644 (file)
@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
 extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
 extern struct inode * minix_new_inode(const struct inode *, int, int *);
 extern void minix_free_inode(struct inode * inode);
-extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
+extern unsigned long minix_count_free_inodes(struct super_block *sb);
 extern int minix_new_block(struct inode * inode);
 extern void minix_free_block(struct inode *inode, unsigned long block);
-extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
+extern unsigned long minix_count_free_blocks(struct super_block *sb);
 extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
 
@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
        return list_entry(inode, struct minix_inode_info, vfs_inode);
 }
 
+static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
+{
+       return DIV_ROUND_UP(bits, blocksize * 8);
+}
+
 #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
        defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
 
@@ -125,7 +130,7 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
        if (!size)
                return 0;
 
-       size = (size >> 4) + ((size & 15) > 0);
+       size >>= 4;
        while (*p++ == 0xffff) {
                if (--size == 0)
                        return (p - addr) << 4;
index e5e1c7d1839b791f0c52428a9ffe058bd7ded092..cfc6d4448aa54bdc538131b1e53285a92bb20073 100644 (file)
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (err)
                goto out;
        seq_putc(m, ' ');
-       seq_path_root(m, &mnt_path, &root, " \t\n\\");
-       if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
-               /*
-                * Mountpoint is outside root, discard that one.  Ugly,
-                * but less so than trying to do that in iterator in a
-                * race-free way (due to renames).
-                */
-               return SEQ_SKIP;
-       }
+
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       if (err)
+               goto out;
+
        seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
        show_mnt_opts(m, mnt);
 
@@ -2483,11 +2480,43 @@ struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
                __mnt_make_longterm(mnt);
                new_ns->root = mnt;
                list_add(&new_ns->list, &new_ns->root->mnt_list);
+       } else {
+               mntput(mnt);
        }
        return new_ns;
 }
 EXPORT_SYMBOL(create_mnt_ns);
 
+struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+{
+       struct mnt_namespace *ns;
+       struct super_block *s;
+       struct path path;
+       int err;
+
+       ns = create_mnt_ns(mnt);
+       if (IS_ERR(ns))
+               return ERR_CAST(ns);
+
+       err = vfs_path_lookup(mnt->mnt_root, mnt,
+                       name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
+
+       put_mnt_ns(ns);
+
+       if (err)
+               return ERR_PTR(err);
+
+       /* trade a vfsmount reference for active sb one */
+       s = path.mnt->mnt_sb;
+       atomic_inc(&s->s_active);
+       mntput(path.mnt);
+       /* lock the sucker */
+       down_write(&s->s_umount);
+       /* ... and return the root of (sub)tree on it */
+       return path.dentry;
+}
+EXPORT_SYMBOL(mount_subtree);
+
 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
                char __user *, type, unsigned long, flags, void __user *, data)
 {
@@ -2744,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt)
        }
 }
 EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+       return check_mnt(mnt);
+}
index 5b5fa33b6b9dfd0384ca0cd7654363d8e2db560c..cbd1a61c110a38ca7a3e1d1e2de8b237e810b92e 100644 (file)
@@ -548,7 +548,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
 
        error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
        if (error)
-               goto out_bdi;
+               goto out_fput;
 
        server->ncp_filp = ncp_filp;
        server->ncp_sock = sock;
@@ -559,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
                error = -EBADF;
                server->info_filp = fget(data.info_fd);
                if (!server->info_filp)
-                       goto out_fput;
+                       goto out_bdi;
                error = -ENOTSOCK;
                sock_inode = server->info_filp->f_path.dentry->d_inode;
                if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +746,9 @@ out_nls:
 out_fput2:
        if (server->info_filp)
                fput(server->info_filp);
-out_fput:
-       bdi_destroy(&server->bdi);
 out_bdi:
+       bdi_destroy(&server->bdi);
+out_fput:
        /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
         * 
         * The previously used put_filp(ncp_filp); was bogus, since
index b238d95ac48c7b926ff1e02eae157ea09fa58a68..ac289909814768a626ad817dbf40627805e7899e 100644 (file)
@@ -1468,12 +1468,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
                                res = NULL;
                                goto out;
                        /* This turned out not to be a regular file */
+                       case -EISDIR:
                        case -ENOTDIR:
                                goto no_open;
                        case -ELOOP:
                                if (!(nd->intent.open.flags & O_NOFOLLOW))
                                        goto no_open;
-                       /* case -EISDIR: */
                        /* case -EINVAL: */
                        default:
                                res = ERR_CAST(inode);
index 0a1f8312b4dcf0fe9272310f1ab6d00178cdd2a8..606ef0f20aed58d7e67b33ab2696382ece03c6f5 100644 (file)
 
 #define NFSDBG_FACILITY                NFSDBG_FILE
 
-static int nfs_file_open(struct inode *, struct file *);
-static int nfs_file_release(struct inode *, struct file *);
-static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin);
-static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
-static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
-                                       struct pipe_inode_info *pipe,
-                                       size_t count, unsigned int flags);
-static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos);
-static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
-                                       struct file *filp, loff_t *ppos,
-                                       size_t count, unsigned int flags);
-static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos);
-static int  nfs_file_flush(struct file *, fl_owner_t id);
-static int  nfs_file_fsync(struct file *, loff_t, loff_t, int datasync);
-static int nfs_check_flags(int flags);
-static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
-static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
-
 static const struct vm_operations_struct nfs_file_vm_ops;
 
-const struct file_operations nfs_file_operations = {
-       .llseek         = nfs_file_llseek,
-       .read           = do_sync_read,
-       .write          = do_sync_write,
-       .aio_read       = nfs_file_read,
-       .aio_write      = nfs_file_write,
-       .mmap           = nfs_file_mmap,
-       .open           = nfs_file_open,
-       .flush          = nfs_file_flush,
-       .release        = nfs_file_release,
-       .fsync          = nfs_file_fsync,
-       .lock           = nfs_lock,
-       .flock          = nfs_flock,
-       .splice_read    = nfs_file_splice_read,
-       .splice_write   = nfs_file_splice_write,
-       .check_flags    = nfs_check_flags,
-       .setlease       = nfs_setlease,
-};
-
 const struct inode_operations nfs_file_inode_operations = {
        .permission     = nfs_permission,
        .getattr        = nfs_getattr,
@@ -187,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
         * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
         * the cached file length
         */
-       if (origin != SEEK_SET || origin != SEEK_CUR) {
+       if (origin != SEEK_SET && origin != SEEK_CUR) {
                struct inode *inode = filp->f_mapping->host;
 
                int retval = nfs_revalidate_file_size(inode, filp);
@@ -886,3 +846,54 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
                        file->f_path.dentry->d_name.name, arg);
        return -EINVAL;
 }
+
+const struct file_operations nfs_file_operations = {
+       .llseek         = nfs_file_llseek,
+       .read           = do_sync_read,
+       .write          = do_sync_write,
+       .aio_read       = nfs_file_read,
+       .aio_write      = nfs_file_write,
+       .mmap           = nfs_file_mmap,
+       .open           = nfs_file_open,
+       .flush          = nfs_file_flush,
+       .release        = nfs_file_release,
+       .fsync          = nfs_file_fsync,
+       .lock           = nfs_lock,
+       .flock          = nfs_flock,
+       .splice_read    = nfs_file_splice_read,
+       .splice_write   = nfs_file_splice_write,
+       .check_flags    = nfs_check_flags,
+       .setlease       = nfs_setlease,
+};
+
+#ifdef CONFIG_NFS_V4
+static int
+nfs4_file_open(struct inode *inode, struct file *filp)
+{
+       /*
+        * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
+        * this point, then something is very wrong
+        */
+       dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
+       return -ENOTDIR;
+}
+
+const struct file_operations nfs4_file_operations = {
+       .llseek         = nfs_file_llseek,
+       .read           = do_sync_read,
+       .write          = do_sync_write,
+       .aio_read       = nfs_file_read,
+       .aio_write      = nfs_file_write,
+       .mmap           = nfs_file_mmap,
+       .open           = nfs4_file_open,
+       .flush          = nfs_file_flush,
+       .release        = nfs_file_release,
+       .fsync          = nfs_file_fsync,
+       .lock           = nfs_lock,
+       .flock          = nfs_flock,
+       .splice_read    = nfs_file_splice_read,
+       .splice_write   = nfs_file_splice_write,
+       .check_flags    = nfs_check_flags,
+       .setlease       = nfs_setlease,
+};
+#endif /* CONFIG_NFS_V4 */
index c07a55aec83867ee1489ff29134862bc891dfa82..50a15fa8cf985e41e21b786f4f6d9d9ed96cf37e 100644 (file)
@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                 */
                inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
                if (S_ISREG(inode->i_mode)) {
-                       inode->i_fop = &nfs_file_operations;
+                       inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
                        inode->i_data.a_ops = &nfs_file_aops;
                        inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
                } else if (S_ISDIR(inode->i_mode)) {
index c1a1bd8ddf1cbecfbba4f375fec6c78d5d36ae60..3f4d95751d52f3e152fc7f2dcb5d1a6d8fc2184b 100644 (file)
@@ -299,6 +299,8 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
 extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
                struct list_head *head);
 
+extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+               struct inode *inode);
 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
 extern void nfs_readdata_release(struct nfs_read_data *rdata);
 
index 85f1690ca08c110bcd1c1754e564f23619295a1a..d4bc9ed917484106dbed777942e0db096be08236 100644 (file)
@@ -853,6 +853,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
        .dentry_ops     = &nfs_dentry_operations,
        .dir_inode_ops  = &nfs3_dir_inode_operations,
        .file_inode_ops = &nfs3_file_inode_operations,
+       .file_ops       = &nfs_file_operations,
        .getroot        = nfs3_proc_get_root,
        .getattr        = nfs3_proc_getattr,
        .setattr        = nfs3_proc_setattr,
index b60fddf606f7d23e6f01305420446d74102001a7..d9f4d78c34131264ba1b58bd36b1ec3cd35e5d6d 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/gss_api.h>
@@ -894,6 +896,8 @@ out:
 
 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
 {
+       if (delegation == NULL)
+               return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
        if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1036,8 +1040,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                }
                rcu_read_lock();
                delegation = rcu_dereference(nfsi->delegation);
-               if (delegation == NULL ||
-                   !can_open_delegated(delegation, fmode)) {
+               if (!can_open_delegated(delegation, fmode)) {
                        rcu_read_unlock();
                        break;
                }
@@ -1091,7 +1094,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
                if (delegation)
                        delegation_flags = delegation->flags;
                rcu_read_unlock();
-               if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+                       pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+                                       "returning a delegation for "
+                                       "OPEN(CLAIM_DELEGATE_CUR)\n",
+                                       NFS_CLIENT(inode)->cl_server);
+               } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                        nfs_inode_set_delegation(state->inode,
                                        data->owner->so_cred,
                                        &data->o_res);
@@ -1423,11 +1431,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                        goto out_no_action;
                rcu_read_lock();
                delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
-               if (delegation != NULL &&
-                   test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
-                       rcu_read_unlock();
-                       goto out_no_action;
-               }
+               if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+                   can_open_delegated(delegation, data->o_arg.fmode))
+                       goto unlock_no_action;
                rcu_read_unlock();
        }
        /* Update sequence id. */
@@ -1444,6 +1450,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                return;
        rpc_call_start(task);
        return;
+unlock_no_action:
+       rcu_read_unlock();
 out_no_action:
        task->tk_action = NULL;
 
@@ -2464,8 +2472,7 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst
                case -NFS4ERR_BADNAME:
                        return -ENOENT;
                case -NFS4ERR_MOVED:
-                       err = nfs4_get_referral(dir, name, fattr, fhandle);
-                       break;
+                       return nfs4_get_referral(dir, name, fattr, fhandle);
                case -NFS4ERR_WRONGSEC:
                        nfs_fixup_secinfo_attributes(fattr, fhandle);
                }
@@ -6253,6 +6260,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
        .dentry_ops     = &nfs4_dentry_operations,
        .dir_inode_ops  = &nfs4_dir_inode_operations,
        .file_inode_ops = &nfs4_file_inode_operations,
+       .file_ops       = &nfs4_file_operations,
        .getroot        = nfs4_proc_get_root,
        .getattr        = nfs4_proc_getattr,
        .setattr        = nfs4_proc_setattr,
index 39914be40b03694008ada2c56af6aaf5fb3a7f97..6a7107ae6b72d407bf95c4953bd1017e4704f63f 100644 (file)
@@ -1156,11 +1156,13 @@ restart:
                if (status >= 0) {
                        status = nfs4_reclaim_locks(state, ops);
                        if (status >= 0) {
+                               spin_lock(&state->state_lock);
                                list_for_each_entry(lock, &state->lock_states, ls_locks) {
                                        if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
                                                printk("%s: Lock reclaim failed!\n",
                                                        __func__);
                                }
+                               spin_unlock(&state->state_lock);
                                nfs4_put_open_state(state);
                                goto restart;
                        }
@@ -1224,10 +1226,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
        clear_bit(NFS_O_RDONLY_STATE, &state->flags);
        clear_bit(NFS_O_WRONLY_STATE, &state->flags);
        clear_bit(NFS_O_RDWR_STATE, &state->flags);
+       spin_lock(&state->state_lock);
        list_for_each_entry(lock, &state->lock_states, ls_locks) {
                lock->ls_seqid.flags = 0;
                lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
        }
+       spin_unlock(&state->state_lock);
 }
 
 static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1354,14 @@ static void nfs4_warn_keyexpired(const char *s)
 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
 {
        switch (error) {
+               case 0:
+                       break;
                case -NFS4ERR_CB_PATH_DOWN:
                        nfs_handle_cb_pathdown(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_NO_GRACE:
                        nfs4_state_end_reclaim_reboot(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_STALE_CLIENTID:
                case -NFS4ERR_LEASE_MOVED:
                        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1381,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
                case -NFS4ERR_SEQ_MISORDERED:
                        set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
                        /* Zero session reset errors */
-                       return 0;
+                       break;
                case -EKEYEXPIRED:
                        /* Nothing we can do */
                        nfs4_warn_keyexpired(clp->cl_hostname);
-                       return 0;
+                       break;
+               default:
+                       return error;
        }
-       return error;
+       return 0;
 }
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1428,7 +1436,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        struct rpc_cred *cred;
        const struct nfs4_state_maintenance_ops *ops =
                clp->cl_mvops->state_renewal_ops;
-       int status = -NFS4ERR_EXPIRED;
+       int status;
 
        /* Is the client already known to have an expired lease? */
        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1446,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        spin_unlock(&clp->cl_lock);
        if (cred == NULL) {
                cred = nfs4_get_setclientid_cred(clp);
+               status = -ENOKEY;
                if (cred == NULL)
                        goto out;
        }
@@ -1525,16 +1534,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 {
        if (!flags)
                return;
-       else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+       if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
-       else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
                            SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
                            SEQ4_STATUS_ADMIN_STATE_REVOKED |
                            SEQ4_STATUS_LEASE_MOVED))
                nfs41_handle_state_revoked(clp);
-       else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+       if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
                nfs41_handle_recallable_state_revoked(clp);
-       else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+       if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
                            SEQ4_STATUS_BACKCHANNEL_FAULT |
                            SEQ4_STATUS_CB_PATH_DOWN_SESSION))
                nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1671,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
 
                if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
                        status = nfs4_check_lease(clp);
+                       if (status < 0)
+                               goto out_error;
                        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
                                continue;
-                       if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
-                               goto out_error;
                }
 
                /* Initialize or reset the session */
index baf73536bc048e2085fabde5c2f523a6fbc3b523..8e672a2b2d693193e8ca7252d70c73578d739e0f 100644 (file)
@@ -1260,6 +1260,25 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
 
+static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+{
+       struct nfs_pageio_descriptor pgio;
+
+       put_lseg(data->lseg);
+       data->lseg = NULL;
+       dprintk("pnfs write error = %d\n", data->pnfs_error);
+
+       nfs_pageio_init_read_mds(&pgio, data->inode);
+
+       while (!list_empty(&data->pages)) {
+               struct nfs_page *req = nfs_list_entry(data->pages.next);
+
+               nfs_list_remove_request(req);
+               nfs_pageio_add_request(&pgio, req);
+       }
+       nfs_pageio_complete(&pgio);
+}
+
 /*
  * Called by non rpc-based layout drivers
  */
@@ -1268,11 +1287,8 @@ void pnfs_ld_read_done(struct nfs_read_data *data)
        if (likely(!data->pnfs_error)) {
                __nfs4_read_done_cb(data);
                data->mds_ops->rpc_call_done(&data->task, data);
-       } else {
-               put_lseg(data->lseg);
-               data->lseg = NULL;
-               dprintk("pnfs write error = %d\n", data->pnfs_error);
-       }
+       } else
+               pnfs_ld_handle_read_error(data);
        data->mds_ops->rpc_release(data);
 }
 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
index ac40b8535d7e0e7493f13063afdbe57169837c9e..f48125da198a2d5a50bc0861805212a129427920 100644 (file)
@@ -710,6 +710,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
        .dentry_ops     = &nfs_dentry_operations,
        .dir_inode_ops  = &nfs_dir_inode_operations,
        .file_inode_ops = &nfs_file_inode_operations,
+       .file_ops       = &nfs_file_operations,
        .getroot        = nfs_proc_get_root,
        .getattr        = nfs_proc_getattr,
        .setattr        = nfs_proc_setattr,
index 8b48ec63f7229de8cb9435cfe82cc9d9876e6268..cfa175c223dcfa5b79ebf17b3d609649fbd7188d 100644 (file)
@@ -109,7 +109,7 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
        }
 }
 
-static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
                struct inode *inode)
 {
        nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
@@ -534,23 +534,13 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
 static void nfs_readpage_release_full(void *calldata)
 {
        struct nfs_read_data *data = calldata;
-       struct nfs_pageio_descriptor pgio;
 
-       if (data->pnfs_error) {
-               nfs_pageio_init_read_mds(&pgio, data->inode);
-               pgio.pg_recoalesce = 1;
-       }
        while (!list_empty(&data->pages)) {
                struct nfs_page *req = nfs_list_entry(data->pages.next);
 
                nfs_list_remove_request(req);
-               if (!data->pnfs_error)
-                       nfs_readpage_release(req);
-               else
-                       nfs_pageio_add_request(&pgio, req);
+               nfs_readpage_release(req);
        }
-       if (data->pnfs_error)
-               nfs_pageio_complete(&pgio);
        nfs_readdata_release(calldata);
 }
 
index 480b3b6bf71ef82f122a433cbf3ec62d789c033a..134777406ee31938271db7ad0af8c8cb444e6fc9 100644 (file)
@@ -2787,43 +2787,18 @@ static void nfs_referral_loop_unprotect(void)
 static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
                const char *export_path)
 {
-       struct mnt_namespace *ns_private;
-       struct super_block *s;
        struct dentry *dentry;
-       struct path path;
-       int ret;
-
-       ns_private = create_mnt_ns(root_mnt);
-       ret = PTR_ERR(ns_private);
-       if (IS_ERR(ns_private))
-               goto out_mntput;
-
-       ret = nfs_referral_loop_protect();
-       if (ret != 0)
-               goto out_put_mnt_ns;
+       int ret = nfs_referral_loop_protect();
 
-       ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
-                       export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
+       if (ret) {
+               mntput(root_mnt);
+               return ERR_PTR(ret);
+       }
 
+       dentry = mount_subtree(root_mnt, export_path);
        nfs_referral_loop_unprotect();
-       put_mnt_ns(ns_private);
-
-       if (ret != 0)
-               goto out_err;
-
-       s = path.mnt->mnt_sb;
-       atomic_inc(&s->s_active);
-       dentry = dget(path.dentry);
 
-       path_put(&path);
-       down_write(&s->s_umount);
        return dentry;
-out_put_mnt_ns:
-       put_mnt_ns(ns_private);
-out_mntput:
-       mntput(root_mnt);
-out_err:
-       return ERR_PTR(ret);
 }
 
 static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
index 41d6743d303c2a7923e5d09bc1df1fcf41514c29..ac258beeda3c4e3e9a3a36a375619838c8c7a350 100644 (file)
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
                if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
                        goto out_free;
 
+               if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+                       goto out_free;
+
                len = argv[n].v_size * argv[n].v_nmembs;
                base = (void __user *)(unsigned long)argv[n].v_base;
                if (len == 0) {
@@ -842,6 +845,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case FS_IOC32_GETVERSION:
                cmd = FS_IOC_GETVERSION;
                break;
+       case NILFS_IOCTL_CHANGE_CPMODE:
+       case NILFS_IOCTL_DELETE_CHECKPOINT:
+       case NILFS_IOCTL_GET_CPINFO:
+       case NILFS_IOCTL_GET_CPSTAT:
+       case NILFS_IOCTL_GET_SUINFO:
+       case NILFS_IOCTL_GET_SUSTAT:
+       case NILFS_IOCTL_GET_VINFO:
+       case NILFS_IOCTL_GET_BDESCS:
+       case NILFS_IOCTL_CLEAN_SEGMENTS:
+       case NILFS_IOCTL_SYNC:
+       case NILFS_IOCTL_RESIZE:
+       case NILFS_IOCTL_SET_ALLOC_RANGE:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index ed553c60de827e0ebad24e3501e0e00d21c82cfc..3165aebb43c87934b743ecf08e5f02cef586d771 100644 (file)
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
                                           OCFS2_JOURNAL_ACCESS_WRITE);
        if (ret) {
                mlog_errno(ret);
-               goto out;
+               goto out_commit;
        }
 
        dquot_free_space_nodirty(inode,
index c1efe939c774e2c9b909892f6c95434d6da760ee..78b68af3b0e32627b1874277d8ae58003501acb5 100644 (file)
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page)
        }
 
        if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
+               /*
+                * Unlock the page and cycle ip_alloc_sem so that we don't
+                * busyloop waiting for ip_alloc_sem to unlock
+                */
                ret = AOP_TRUNCATED_PAGE;
+               unlock_page(page);
+               unlock = 0;
+               down_read(&oi->ip_alloc_sem);
+               up_read(&oi->ip_alloc_sem);
                goto out_inode_unlock;
        }
 
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
 {
        struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
        int level;
+       wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
 
        /* this io's submitter should not have unlocked this before we could */
        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
        if (ocfs2_iocb_is_sem_locked(iocb))
                ocfs2_iocb_clear_sem_locked(iocb);
 
+       if (ocfs2_iocb_is_unaligned_aio(iocb)) {
+               ocfs2_iocb_clear_unaligned_aio(iocb);
+
+               if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
+                   waitqueue_active(wq)) {
+                       wake_up_all(wq);
+               }
+       }
+
        ocfs2_iocb_clear_rw_locked(iocb);
 
        level = ocfs2_iocb_rw_locked_level(iocb);
@@ -862,6 +880,12 @@ struct ocfs2_write_ctxt {
        struct page                     *w_pages[OCFS2_MAX_CTXT_PAGES];
        struct page                     *w_target_page;
 
+       /*
+        * w_target_locked is used for page_mkwrite path indicating no unlocking
+        * against w_target_page in ocfs2_write_end_nolock.
+        */
+       unsigned int                    w_target_locked:1;
+
        /*
         * ocfs2_write_end() uses this to know what the real range to
         * write in the target should be.
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
 
 static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
 {
+       int i;
+
+       /*
+        * w_target_locked is only set to true in the page_mkwrite() case.
+        * The intent is to allow us to lock the target page from write_begin()
+        * to write_end(). The caller must hold a ref on w_target_page.
+        */
+       if (wc->w_target_locked) {
+               BUG_ON(!wc->w_target_page);
+               for (i = 0; i < wc->w_num_pages; i++) {
+                       if (wc->w_target_page == wc->w_pages[i]) {
+                               wc->w_pages[i] = NULL;
+                               break;
+                       }
+               }
+               mark_page_accessed(wc->w_target_page);
+               page_cache_release(wc->w_target_page);
+       }
        ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
 
        brelse(wc->w_di_bh);
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                         */
                        lock_page(mmap_page);
 
+                       /* Exit and let the caller retry */
                        if (mmap_page->mapping != mapping) {
+                               WARN_ON(mmap_page->mapping);
                                unlock_page(mmap_page);
-                               /*
-                                * Sanity check - the locking in
-                                * ocfs2_pagemkwrite() should ensure
-                                * that this code doesn't trigger.
-                                */
-                               ret = -EINVAL;
-                               mlog_errno(ret);
+                               ret = -EAGAIN;
                                goto out;
                        }
 
                        page_cache_get(mmap_page);
                        wc->w_pages[i] = mmap_page;
+                       wc->w_target_locked = true;
                } else {
                        wc->w_pages[i] = find_or_create_page(mapping, index,
                                                             GFP_NOFS);
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                        wc->w_target_page = wc->w_pages[i];
        }
 out:
+       if (ret)
+               wc->w_target_locked = false;
        return ret;
 }
 
@@ -1817,11 +1858,23 @@ try_again:
         */
        ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
                                         cluster_of_pages, mmap_page);
-       if (ret) {
+       if (ret && ret != -EAGAIN) {
                mlog_errno(ret);
                goto out_quota;
        }
 
+       /*
+        * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
+        * the target page. In this case, we exit with no error and no target
+        * page. This will trigger the caller, page_mkwrite(), to re-try
+        * the operation.
+        */
+       if (ret == -EAGAIN) {
+               BUG_ON(wc->w_target_page);
+               ret = 0;
+               goto out_quota;
+       }
+
        ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
                                          len);
        if (ret) {
index 75cf3ad987a66d911c15234a803243185ccc5a94..ffb2da370a99d05dd4b919fc64a5483dbc2df7a3 100644 (file)
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits {
        OCFS2_IOCB_RW_LOCK = 0,
        OCFS2_IOCB_RW_LOCK_LEVEL,
        OCFS2_IOCB_SEM,
+       OCFS2_IOCB_UNALIGNED_IO,
        OCFS2_IOCB_NUM_LOCKS
 };
 
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits {
        clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
 #define ocfs2_iocb_is_sem_locked(iocb) \
        test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
+
+#define ocfs2_iocb_set_unaligned_aio(iocb) \
+       set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_clear_unaligned_aio(iocb) \
+       clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_is_unaligned_aio(iocb) \
+       test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+
+#define OCFS2_IOEND_WQ_HASH_SZ 37
+#define ocfs2_ioend_wq(v)   (&ocfs2__ioend_wq[((unsigned long)(v)) %\
+                                           OCFS2_IOEND_WQ_HASH_SZ])
+extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
 #endif /* OCFS2_FILE_H */
index 9a3e6bbff27bd4839b487c2c14282fd9ef1a4675..a4e855e3690e6ab844d37788b71649975321cb19 100644 (file)
@@ -216,6 +216,7 @@ struct o2hb_region {
 
        struct list_head        hr_all_item;
        unsigned                hr_unclean_stop:1,
+                               hr_aborted_start:1,
                                hr_item_pinned:1,
                                hr_item_dropped:1;
 
@@ -254,6 +255,10 @@ struct o2hb_region {
         * a more complete api that doesn't lead to this sort of fragility. */
        atomic_t                hr_steady_iterations;
 
+       /* terminate o2hb thread if it does not reach steady state
+        * (hr_steady_iterations == 0) within hr_unsteady_iterations */
+       atomic_t                hr_unsteady_iterations;
+
        char                    hr_dev_name[BDEVNAME_SIZE];
 
        unsigned int            hr_timeout_ms;
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work)
 
 static void o2hb_arm_write_timeout(struct o2hb_region *reg)
 {
+       /* Arm writeout only after thread reaches steady state */
+       if (atomic_read(&reg->hr_steady_iterations) != 0)
+               return;
+
        mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
             O2HB_MAX_WRITE_TIMEOUT_MS);
 
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
        return read == computed;
 }
 
-/* We want to make sure that nobody is heartbeating on top of us --
- * this will help detect an invalid configuration. */
-static void o2hb_check_last_timestamp(struct o2hb_region *reg)
+/*
+ * Compare the slot data with what we wrote in the last iteration.
+ * If the match fails, print an appropriate error message. This is to
+ * detect errors like... another node hearting on the same slot,
+ * flaky device that is losing writes, etc.
+ * Returns 1 if check succeeds, 0 otherwise.
+ */
+static int o2hb_check_own_slot(struct o2hb_region *reg)
 {
        struct o2hb_disk_slot *slot;
        struct o2hb_disk_heartbeat_block *hb_block;
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
        slot = &reg->hr_slots[o2nm_this_node()];
        /* Don't check on our 1st timestamp */
        if (!slot->ds_last_time)
-               return;
+               return 0;
 
        hb_block = slot->ds_raw_block;
        if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
            le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
            hb_block->hb_node == slot->ds_node_num)
-               return;
+               return 1;
 
 #define ERRSTR1                "Another node is heartbeating on device"
 #define ERRSTR2                "Heartbeat generation mismatch on device"
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
             (unsigned long long)slot->ds_last_time, hb_block->hb_node,
             (unsigned long long)le64_to_cpu(hb_block->hb_generation),
             (unsigned long long)le64_to_cpu(hb_block->hb_seq));
+
+       return 0;
 }
 
 static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
        o2nm_node_put(node);
 }
 
-static void o2hb_set_quorum_device(struct o2hb_region *reg,
-                                  struct o2hb_disk_slot *slot)
+static void o2hb_set_quorum_device(struct o2hb_region *reg)
 {
-       assert_spin_locked(&o2hb_live_lock);
-
        if (!o2hb_global_heartbeat_active())
                return;
 
-       if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+       /* Prevent race with o2hb_heartbeat_group_drop_item() */
+       if (kthread_should_stop())
+               return;
+
+       /* Tag region as quorum only after thread reaches steady state */
+       if (atomic_read(&reg->hr_steady_iterations) != 0)
                return;
 
+       spin_lock(&o2hb_live_lock);
+
+       if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+               goto unlock;
+
        /*
         * A region can be added to the quorum only when it sees all
         * live nodes heartbeat on it. In other words, the region has been
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
         */
        if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
                   sizeof(o2hb_live_node_bitmap)))
-               return;
-
-       if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD)
-               return;
+               goto unlock;
 
-       printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n",
-              config_item_name(&reg->hr_item));
+       printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
+              config_item_name(&reg->hr_item), reg->hr_dev_name);
 
        set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
 
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
        if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
                           O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
                o2hb_region_unpin(NULL);
+unlock:
+       spin_unlock(&o2hb_live_lock);
 }
 
 static int o2hb_check_slot(struct o2hb_region *reg,
@@ -925,8 +947,6 @@ fire_callbacks:
                slot->ds_equal_samples = 0;
        }
 out:
-       o2hb_set_quorum_device(reg, slot);
-
        spin_unlock(&o2hb_live_lock);
 
        o2hb_run_event_list(&event);
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes,
 
 static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
 {
-       int i, ret, highest_node, change = 0;
+       int i, ret, highest_node;
+       int membership_change = 0, own_slot_ok = 0;
        unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
        unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
        struct o2hb_bio_wait_ctxt write_wc;
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
                                       sizeof(configured_nodes));
        if (ret) {
                mlog_errno(ret);
-               return ret;
+               goto bail;
        }
 
        /*
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
 
        highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
        if (highest_node >= O2NM_MAX_NODES) {
-               mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
-               return -EINVAL;
+               mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
+               ret = -EINVAL;
+               goto bail;
        }
 
        /* No sense in reading the slots of nodes that don't exist
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
        ret = o2hb_read_slots(reg, highest_node + 1);
        if (ret < 0) {
                mlog_errno(ret);
-               return ret;
+               goto bail;
        }
 
        /* With an up to date view of the slots, we can check that no
         * other node has been improperly configured to heartbeat in
         * our slot. */
-       o2hb_check_last_timestamp(reg);
+       own_slot_ok = o2hb_check_own_slot(reg);
 
        /* fill in the proper info for our next heartbeat */
        o2hb_prepare_block(reg, reg->hr_generation);
 
-       /* And fire off the write. Note that we don't wait on this I/O
-        * until later. */
        ret = o2hb_issue_node_write(reg, &write_wc);
        if (ret < 0) {
                mlog_errno(ret);
-               return ret;
+               goto bail;
        }
 
        i = -1;
        while((i = find_next_bit(configured_nodes,
                                 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
-               change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
+               membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
        }
 
        /*
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
                 * disk */
                mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
                     write_wc.wc_error, reg->hr_dev_name);
-               return write_wc.wc_error;
+               ret = write_wc.wc_error;
+               goto bail;
        }
 
-       o2hb_arm_write_timeout(reg);
+       /* Skip disarming the timeout if own slot has stale/bad data */
+       if (own_slot_ok) {
+               o2hb_set_quorum_device(reg);
+               o2hb_arm_write_timeout(reg);
+       }
 
+bail:
        /* let the person who launched us know when things are steady */
-       if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
-               if (atomic_dec_and_test(&reg->hr_steady_iterations))
+       if (atomic_read(&reg->hr_steady_iterations) != 0) {
+               if (!ret && own_slot_ok && !membership_change) {
+                       if (atomic_dec_and_test(&reg->hr_steady_iterations))
+                               wake_up(&o2hb_steady_queue);
+               }
+       }
+
+       if (atomic_read(&reg->hr_steady_iterations) != 0) {
+               if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
+                       printk(KERN_NOTICE "o2hb: Unable to stabilize "
+                              "heartbeart on region %s (%s)\n",
+                              config_item_name(&reg->hr_item),
+                              reg->hr_dev_name);
+                       atomic_set(&reg->hr_steady_iterations, 0);
+                       reg->hr_aborted_start = 1;
                        wake_up(&o2hb_steady_queue);
+                       ret = -EIO;
+               }
        }
 
-       return 0;
+       return ret;
 }
 
 /* Subtract b from a, storing the result in a. a *must* have a larger
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data)
        /* Pin node */
        o2nm_depend_this_node();
 
-       while (!kthread_should_stop() && !reg->hr_unclean_stop) {
+       while (!kthread_should_stop() &&
+              !reg->hr_unclean_stop && !reg->hr_aborted_start) {
                /* We track the time spent inside
                 * o2hb_do_disk_heartbeat so that we avoid more than
                 * hr_timeout_ms between disk writes. On busy systems
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data)
                 * likely to time itself out. */
                do_gettimeofday(&before_hb);
 
-               i = 0;
-               do {
-                       ret = o2hb_do_disk_heartbeat(reg);
-               } while (ret && ++i < 2);
+               ret = o2hb_do_disk_heartbeat(reg);
 
                do_gettimeofday(&after_hb);
                elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data)
                     after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
                     elapsed_msec);
 
-               if (elapsed_msec < reg->hr_timeout_ms) {
+               if (!kthread_should_stop() &&
+                   elapsed_msec < reg->hr_timeout_ms) {
                        /* the kthread api has blocked signals for us so no
                         * need to record the return value. */
                        msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data)
         * to timeout on this region when we could just as easily
         * write a clear generation - thus indicating to them that
         * this node has left this region.
-        *
-        * XXX: Should we skip this on unclean_stop? */
-       o2hb_prepare_block(reg, 0);
-       ret = o2hb_issue_node_write(reg, &write_wc);
-       if (ret == 0) {
-               o2hb_wait_on_io(reg, &write_wc);
-       } else {
-               mlog_errno(ret);
+        */
+       if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
+               o2hb_prepare_block(reg, 0);
+               ret = o2hb_issue_node_write(reg, &write_wc);
+               if (ret == 0)
+                       o2hb_wait_on_io(reg, &write_wc);
+               else
+                       mlog_errno(ret);
        }
 
        /* Unpin node */
        o2nm_undepend_this_node();
 
-       mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
+       mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
 
        return 0;
 }
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
        struct o2hb_debug_buf *db = inode->i_private;
        struct o2hb_region *reg;
        unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+       unsigned long lts;
        char *buf = NULL;
        int i = -1;
        int out = 0;
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
 
        case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
                reg = (struct o2hb_region *)db->db_data;
-               out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
-                               jiffies_to_msecs(jiffies -
-                                                reg->hr_last_timeout_start));
+               lts = reg->hr_last_timeout_start;
+               /* If 0, it has never been set before */
+               if (lts)
+                       lts = jiffies_to_msecs(jiffies - lts);
+               out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
                goto done;
 
        case O2HB_DB_TYPE_REGION_PINNED:
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item)
        struct page *page;
        struct o2hb_region *reg = to_o2hb_region(item);
 
+       mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
+
        if (reg->hr_tmp_block)
                kfree(reg->hr_tmp_block);
 
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
                        live_threshold <<= 1;
                spin_unlock(&o2hb_live_lock);
        }
-       atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
+       ++live_threshold;
+       atomic_set(&reg->hr_steady_iterations, live_threshold);
+       /* unsteady_iterations is double the steady_iterations */
+       atomic_set(&reg->hr_unsteady_iterations, (live_threshold << 1));
 
        hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
                              reg->hr_item.ci_name);
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        ret = wait_event_interruptible(o2hb_steady_queue,
                                atomic_read(&reg->hr_steady_iterations) == 0);
        if (ret) {
-               /* We got interrupted (hello ptrace!).  Clean up */
-               spin_lock(&o2hb_live_lock);
-               hb_task = reg->hr_task;
-               reg->hr_task = NULL;
-               spin_unlock(&o2hb_live_lock);
+               atomic_set(&reg->hr_steady_iterations, 0);
+               reg->hr_aborted_start = 1;
+       }
 
-               if (hb_task)
-                       kthread_stop(hb_task);
+       if (reg->hr_aborted_start) {
+               ret = -EIO;
                goto out;
        }
 
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
                ret = -EIO;
 
        if (hb_task && o2hb_global_heartbeat_active())
-               printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n",
-                      config_item_name(&reg->hr_item));
+               printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
+                      config_item_name(&reg->hr_item), reg->hr_dev_name);
 
 out:
        if (filp)
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
 
        /* stop the thread when the user removes the region dir */
        spin_lock(&o2hb_live_lock);
-       if (o2hb_global_heartbeat_active()) {
-               clear_bit(reg->hr_region_num, o2hb_region_bitmap);
-               clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
-               if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
-                       quorum_region = 1;
-               clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
-       }
        hb_task = reg->hr_task;
        reg->hr_task = NULL;
        reg->hr_item_dropped = 1;
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
        if (hb_task)
                kthread_stop(hb_task);
 
+       if (o2hb_global_heartbeat_active()) {
+               spin_lock(&o2hb_live_lock);
+               clear_bit(reg->hr_region_num, o2hb_region_bitmap);
+               clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+               if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+                       quorum_region = 1;
+               clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+               spin_unlock(&o2hb_live_lock);
+               printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
+                      ((atomic_read(&reg->hr_steady_iterations) == 0) ?
+                       "stopped" : "start aborted"), config_item_name(item),
+                      reg->hr_dev_name);
+       }
+
        /*
         * If we're racing a dev_write(), we need to wake them.  They will
         * check reg->hr_task
         */
        if (atomic_read(&reg->hr_steady_iterations) != 0) {
+               reg->hr_aborted_start = 1;
                atomic_set(&reg->hr_steady_iterations, 0);
                wake_up(&o2hb_steady_queue);
        }
 
-       if (o2hb_global_heartbeat_active())
-               printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
-                      config_item_name(&reg->hr_item));
-
        config_item_put(item);
 
        if (!o2hb_global_heartbeat_active() || !quorum_region)
index 3a5835904b3db4d522c561908171f528bb74f6bd..dc45deb19e6885e56a1f5be46cbd39444c46f810 100644 (file)
@@ -47,6 +47,7 @@
 #define SC_DEBUG_NAME          "sock_containers"
 #define NST_DEBUG_NAME         "send_tracking"
 #define STATS_DEBUG_NAME       "stats"
+#define NODES_DEBUG_NAME       "connected_nodes"
 
 #define SHOW_SOCK_CONTAINERS   0
 #define SHOW_SOCK_STATS                1
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry;
 static struct dentry *sc_dentry;
 static struct dentry *nst_dentry;
 static struct dentry *stats_dentry;
+static struct dentry *nodes_dentry;
 
 static DEFINE_SPINLOCK(o2net_debug_lock);
 
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = {
        .release = sc_fop_release,
 };
 
-int o2net_debugfs_init(void)
+static int o2net_fill_bitmap(char *buf, int len)
 {
-       o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
-       if (!o2net_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+       int i = -1, out = 0;
 
-       nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR,
-                                        o2net_dentry, NULL,
-                                        &nst_seq_fops);
-       if (!nst_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       o2net_fill_node_map(map, sizeof(map));
 
-       sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR,
-                                       o2net_dentry, NULL,
-                                       &sc_seq_fops);
-       if (!sc_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
+               out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+       out += snprintf(buf + out, PAGE_SIZE - out, "\n");
 
-       stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
-                                          o2net_dentry, NULL,
-                                          &stats_seq_fops);
-       if (!stats_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       return out;
+}
+
+static int nodes_fop_open(struct inode *inode, struct file *file)
+{
+       char *buf;
+
+       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
+
+       file->private_data = buf;
 
        return 0;
-bail:
-       debugfs_remove(stats_dentry);
-       debugfs_remove(sc_dentry);
-       debugfs_remove(nst_dentry);
-       debugfs_remove(o2net_dentry);
-       return -ENOMEM;
 }
 
+static int o2net_debug_release(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
+
+static ssize_t o2net_debug_read(struct file *file, char __user *buf,
+                               size_t nbytes, loff_t *ppos)
+{
+       return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+                                      i_size_read(file->f_mapping->host));
+}
+
+static const struct file_operations nodes_fops = {
+       .open           = nodes_fop_open,
+       .release        = o2net_debug_release,
+       .read           = o2net_debug_read,
+       .llseek         = generic_file_llseek,
+};
+
 void o2net_debugfs_exit(void)
 {
+       debugfs_remove(nodes_dentry);
        debugfs_remove(stats_dentry);
        debugfs_remove(sc_dentry);
        debugfs_remove(nst_dentry);
        debugfs_remove(o2net_dentry);
 }
 
+int o2net_debugfs_init(void)
+{
+       mode_t mode = S_IFREG|S_IRUSR;
+
+       o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
+       if (o2net_dentry)
+               nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &nst_seq_fops);
+       if (nst_dentry)
+               sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &sc_seq_fops);
+       if (sc_dentry)
+               stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &stats_seq_fops);
+       if (stats_dentry)
+               nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &nodes_fops);
+       if (nodes_dentry)
+               return 0;
+
+       o2net_debugfs_exit();
+       mlog_errno(-ENOMEM);
+       return -ENOMEM;
+}
+
 #endif /* CONFIG_DEBUG_FS */
index ad7d0c155de41a3912b5a6fa330b28ca48edf790..044e7b58d31c7662a75e29f636cfb86bf2846b67 100644 (file)
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
        }
 
        if (was_valid && !valid) {
-               printk(KERN_NOTICE "o2net: no longer connected to "
+               printk(KERN_NOTICE "o2net: No longer connected to "
                       SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
                o2net_complete_nodes_nsw(nn);
        }
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
                cancel_delayed_work(&nn->nn_connect_expired);
                printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
                       o2nm_this_node() > sc->sc_node->nd_num ?
-                               "connected to" : "accepted connection from",
+                      "Connected to" : "Accepted connection from",
                       SC_NODEF_ARGS(sc));
        }
 
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk)
                        o2net_sc_queue_work(sc, &sc->sc_connect_work);
                        break;
                default:
-                       printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT
+                       printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
                              " shutdown, state %d\n",
                              SC_NODEF_ARGS(sc), sk->sk_state);
                        o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
        return ret;
 }
 
+/* Get a map of all nodes to which this node is currently connected to */
+void o2net_fill_node_map(unsigned long *map, unsigned bytes)
+{
+       struct o2net_sock_container *sc;
+       int node, ret;
+
+       BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
+
+       memset(map, 0, bytes);
+       for (node = 0; node < O2NM_MAX_NODES; ++node) {
+               o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
+               if (!ret) {
+                       set_bit(node, map);
+                       sc_put(sc);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(o2net_fill_node_map);
+
 int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
                           size_t caller_veclen, u8 target_node, int *status)
 {
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 
        if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol "
-                    "version %llu but %llu is required, disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    (unsigned long long)be64_to_cpu(hand->protocol_version),
-                    O2NET_PROTOCOL_VERSION);
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
+                      "protocol version %llu but %llu is required. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      (unsigned long long)be64_to_cpu(hand->protocol_version),
+                      O2NET_PROTOCOL_VERSION);
 
                /* don't bother reconnecting if its the wrong version. */
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
         */
        if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
                                o2net_idle_timeout()) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of "
-                    "%u ms, but we use %u ms locally.  disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    be32_to_cpu(hand->o2net_idle_timeout_ms),
-                    o2net_idle_timeout());
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
+                      "idle timeout of %u ms, but we use %u ms locally. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      be32_to_cpu(hand->o2net_idle_timeout_ms),
+                      o2net_idle_timeout());
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
                return -1;
        }
 
        if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
                        o2net_keepalive_delay()) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of "
-                    "%u ms, but we use %u ms locally.  disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    be32_to_cpu(hand->o2net_keepalive_delay_ms),
-                    o2net_keepalive_delay());
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
+                      "delay of %u ms, but we use %u ms locally. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      be32_to_cpu(hand->o2net_keepalive_delay_ms),
+                      o2net_keepalive_delay());
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
                return -1;
        }
 
        if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
                        O2HB_MAX_WRITE_TIMEOUT_MS) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of "
-                    "%u ms, but we use %u ms locally.  disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
-                    O2HB_MAX_WRITE_TIMEOUT_MS);
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
+                      "timeout of %u ms, but we use %u ms locally. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
+                      O2HB_MAX_WRITE_TIMEOUT_MS);
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
                return -1;
        }
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data)
 {
        struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-
 #ifdef CONFIG_DEBUG_FS
-       ktime_t now = ktime_get();
+       unsigned long msecs = ktime_to_ms(ktime_get()) -
+               ktime_to_ms(sc->sc_tv_timer);
+#else
+       unsigned long msecs = o2net_idle_timeout();
 #endif
 
-       printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
-            "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
-                    o2net_idle_timeout() / 1000,
-                    o2net_idle_timeout() % 1000);
-
-#ifdef CONFIG_DEBUG_FS
-       mlog(ML_NOTICE, "Here are some times that might help debug the "
-            "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
-            "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
-            (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
-            (long long)ktime_to_us(sc->sc_tv_data_ready),
-            (long long)ktime_to_us(sc->sc_tv_advance_start),
-            (long long)ktime_to_us(sc->sc_tv_advance_stop),
-            sc->sc_msg_key, sc->sc_msg_type,
-            (long long)ktime_to_us(sc->sc_tv_func_start),
-            (long long)ktime_to_us(sc->sc_tv_func_stop));
-#endif
+       printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
+              "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
+              msecs / 1000, msecs % 1000);
 
        /*
         * Initialize the nn_timeout so that the next connection attempt
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work)
 
 out:
        if (ret) {
-               mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed "
-                    "with errno %d\n", SC_NODEF_ARGS(sc), ret);
+               printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
+                      " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
                /* 0 err so that another will be queued and attempted
                 * from set_nn_state */
                if (sc)
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work)
 
        spin_lock(&nn->nn_lock);
        if (!nn->nn_sc_valid) {
-               mlog(ML_ERROR, "no connection established with node %u after "
-                    "%u.%u seconds, giving up and returning errors.\n",
+               printk(KERN_NOTICE "o2net: No connection established with "
+                      "node %u after %u.%u seconds, giving up.\n",
                     o2net_num_from_nn(nn),
                     o2net_idle_timeout() / 1000,
                     o2net_idle_timeout() % 1000);
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock)
 
        node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
        if (node == NULL) {
-               mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n",
-                    &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+               printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
+                      "node at %pI4:%d\n", &sin.sin_addr.s_addr,
+                      ntohs(sin.sin_port));
                ret = -EINVAL;
                goto out;
        }
 
        if (o2nm_this_node() >= node->nd_num) {
                local_node = o2nm_get_node_by_num(o2nm_this_node());
-               mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' ("
-                    "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n",
-                    local_node->nd_name, local_node->nd_num,
-                    &(local_node->nd_ipv4_address),
-                    ntohs(local_node->nd_ipv4_port),
-                    node->nd_name, node->nd_num, &sin.sin_addr.s_addr,
-                    ntohs(sin.sin_port));
+               printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
+                      "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
+                      "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
+                      &(local_node->nd_ipv4_address),
+                      ntohs(local_node->nd_ipv4_port), node->nd_name,
+                      node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
                ret = -EINVAL;
                goto out;
        }
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock)
                ret = 0;
        spin_unlock(&nn->nn_lock);
        if (ret) {
-               mlog(ML_NOTICE, "attempt to connect from node '%s' at "
-                    "%pI4:%d but it already has an open connection\n",
-                    node->nd_name, &sin.sin_addr.s_addr,
-                    ntohs(sin.sin_port));
+               printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
+                      "at %pI4:%d but it already has an open connection\n",
+                      node->nd_name, &sin.sin_addr.s_addr,
+                      ntohs(sin.sin_port));
                goto out;
        }
 
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
 
        ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0) {
-               mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret);
+               printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
                goto out;
        }
 
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
        sock->sk->sk_reuse = 1;
        ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
        if (ret < 0) {
-               mlog(ML_ERROR, "unable to bind socket at %pI4:%u, "
-                    "ret=%d\n", &addr, ntohs(port), ret);
+               printk(KERN_ERR "o2net: Error %d while binding socket at "
+                      "%pI4:%u\n", ret, &addr, ntohs(port)); 
                goto out;
        }
 
        ret = sock->ops->listen(sock, 64);
-       if (ret < 0) {
-               mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n",
-                    &addr, ntohs(port), ret);
-       }
+       if (ret < 0)
+               printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
+                      ret, &addr, ntohs(port));
 
 out:
        if (ret) {
index fd6179eb26d4cd2cfb43f4ff053237837712113e..5bada2a69b503cd365d626a25e4ae6d08ad50b7a 100644 (file)
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
                           struct list_head *unreg_list);
 void o2net_unregister_handler_list(struct list_head *list);
 
+void o2net_fill_node_map(unsigned long *map, unsigned bytes);
+
 struct o2nm_node;
 int o2net_register_hb_callbacks(void);
 void o2net_unregister_hb_callbacks(void);
index e2878b5895fb543a86c11f0128b025dbb0335c38..8fe4e2892ab9ccd983304a825c338df57a5376b8 100644 (file)
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
                        if (pde)
                                le16_add_cpu(&pde->rec_len,
                                                le16_to_cpu(de->rec_len));
-                       else
-                               de->inode = 0;
+                       de->inode = 0;
                        dir->i_version++;
                        ocfs2_journal_dirty(handle, bh);
                        goto bail;
index d602abb51b610d525cc437daa05c25d2105fe0c3..a5952ceecba5a83147389ad4a1cd24972ee0bfbe 100644 (file)
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
 void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
 
 void dlm_put(struct dlm_ctxt *dlm);
 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res)
        kref_get(&res->refs);
 }
 void dlm_lockres_put(struct dlm_lock_resource *res);
-void __dlm_unhash_lockres(struct dlm_lock_resource *res);
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
-                         struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
                                                     const char *name,
                                                     unsigned int len,
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
                                          const char *name,
                                          unsigned int namelen);
 
-#define dlm_lockres_set_refmap_bit(bit,res)  \
-       __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
-#define dlm_lockres_clear_refmap_bit(bit,res)  \
-       __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+                               struct dlm_lock_resource *res, int bit);
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+                                 struct dlm_lock_resource *res, int bit);
 
-static inline void __dlm_lockres_set_refmap_bit(int bit,
-                                               struct dlm_lock_resource *res,
-                                               const char *file,
-                                               int line)
-{
-       //printk("%s:%d:%.*s: setting bit %d\n", file, line,
-       //     res->lockname.len, res->lockname.name, bit);
-       set_bit(bit, res->refmap);
-}
-
-static inline void __dlm_lockres_clear_refmap_bit(int bit,
-                                                 struct dlm_lock_resource *res,
-                                                 const char *file,
-                                                 int line)
-{
-       //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
-       //     res->lockname.len, res->lockname.name, bit);
-       clear_bit(bit, res->refmap);
-}
-
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  const char *file,
-                                  int line);
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  int new_lockres,
-                                  const char *file,
-                                  int line);
-#define dlm_lockres_drop_inflight_ref(d,r)  \
-       __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref(d,r)  \
-       __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref_new(d,r)  \
-       __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res);
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res);
 
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
index 6ed6b95dcf935a6516e935b85a3ca9ffc0b8a9d8..92f2ead0fab6de22fa138cc4410dee6e1544216c 100644 (file)
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing,
 
 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
 
-void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-       if (!hlist_unhashed(&lockres->hash_node)) {
-               hlist_del_init(&lockres->hash_node);
-               dlm_lockres_put(lockres);
-       }
+       if (hlist_unhashed(&res->hash_node))
+               return;
+
+       mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
+            res->lockname.name);
+       hlist_del_init(&res->hash_node);
+       dlm_lockres_put(res);
 }
 
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
-                      struct dlm_lock_resource *res)
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
        struct hlist_head *bucket;
        struct qstr *q;
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
        dlm_lockres_get(res);
 
        hlist_add_head(&res->hash_node, bucket);
+
+       mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
+            res->lockname.name);
 }
 
 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
@@ -539,17 +544,17 @@ again:
 
 static void __dlm_print_nodes(struct dlm_ctxt *dlm)
 {
-       int node = -1;
+       int node = -1, num = 0;
 
        assert_spin_locked(&dlm->spinlock);
 
-       printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
-
+       printk("( ");
        while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
                                     node + 1)) < O2NM_MAX_NODES) {
                printk("%d ", node);
+               ++num;
        }
-       printk("\n");
+       printk(") %u nodes\n", num);
 }
 
 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
 
        node = exit_msg->node_idx;
 
-       printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
-
        spin_lock(&dlm->spinlock);
        clear_bit(node, dlm->domain_map);
        clear_bit(node, dlm->exit_domain_map);
+       printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
        __dlm_print_nodes(dlm);
 
        /* notify anything attached to the heartbeat events */
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
                dlm_mark_domain_leaving(dlm);
                dlm_leave_domain(dlm);
+               printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
                dlm_force_free_mles(dlm);
                dlm_complete_dlm_shutdown(dlm);
        }
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
                clear_bit(assert->node_idx, dlm->exit_domain_map);
                __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
 
-               printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
+               printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
                       assert->node_idx, dlm->name);
                __dlm_print_nodes(dlm);
 
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
 bail:
        spin_lock(&dlm->spinlock);
        __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
-       if (!status)
+       if (!status) {
+               printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
                __dlm_print_nodes(dlm);
+       }
        spin_unlock(&dlm->spinlock);
 
        if (ctxt) {
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
                goto leave;
        }
 
-       if (!o2hb_check_local_node_heartbeating()) {
-               mlog(ML_ERROR, "the local node has not been configured, or is "
-                    "not heartbeating\n");
-               ret = -EPROTO;
-               goto leave;
-       }
-
        mlog(0, "register called for domain \"%s\"\n", domain);
 
 retry:
index 8d39e0fd66f7379b8fb08ac40502efa0d4a51cc6..975810b98492a34f4576b9d3d3ac3e4421fb18c1 100644 (file)
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
                        kick_thread = 1;
                }
        }
-       /* reduce the inflight count, this may result in the lockres
-        * being purged below during calc_usage */
-       if (lock->ml.node == dlm->node_num)
-               dlm_lockres_drop_inflight_ref(dlm, res);
 
        spin_unlock(&res->spinlock);
        wake_up(&res->wq);
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
             lock->ml.type, res->lockname.len,
             res->lockname.name, flags);
 
+       /*
+        * Wait if resource is getting recovered, remastered, etc.
+        * If the resource was remastered and new owner is self, then exit.
+        */
        spin_lock(&res->spinlock);
-
-       /* will exit this call with spinlock held */
        __dlm_wait_on_lockres(res);
+       if (res->owner == dlm->node_num) {
+               spin_unlock(&res->spinlock);
+               return DLM_RECOVERING;
+       }
        res->state |= DLM_LOCK_RES_IN_PROGRESS;
 
        /* add lock to local (secondary) queue */
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
        tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
                                    sizeof(create), res->owner, &status);
        if (tmpret >= 0) {
-               // successfully sent and received
-               ret = status;  // this is already a dlm_status
+               ret = status;
                if (ret == DLM_REJECTED) {
-                       mlog(ML_ERROR, "%s:%.*s: BUG.  this is a stale lockres "
-                            "no longer owned by %u.  that node is coming back "
-                            "up currently.\n", dlm->name, create.namelen,
+                       mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
+                            "owned by node %u. That node is coming back up "
+                            "currently.\n", dlm->name, create.namelen,
                             create.name, res->owner);
                        dlm_print_one_lock_resource(res);
                        BUG();
                }
        } else {
-               mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-                    "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key,
-                    res->owner);
-               if (dlm_is_host_down(tmpret)) {
+               mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
+                    "node %u\n", dlm->name, create.namelen, create.name,
+                    tmpret, res->owner);
+               if (dlm_is_host_down(tmpret))
                        ret = DLM_RECOVERING;
-                       mlog(0, "node %u died so returning DLM_RECOVERING "
-                            "from lock message!\n", res->owner);
-               } else {
+               else
                        ret = dlm_err_to_dlm_status(tmpret);
-               }
        }
 
        return ret;
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
                /* zero memory only if kernel-allocated */
                lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
                if (!lksb) {
-                       kfree(lock);
+                       kmem_cache_free(dlm_lock_cache, lock);
                        return NULL;
                }
                kernel_allocated = 1;
@@ -718,18 +716,10 @@ retry_lock:
 
                if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
                    status == DLM_FORWARD) {
-                       mlog(0, "retrying lock with migration/"
-                            "recovery/in progress\n");
                        msleep(100);
-                       /* no waiting for dlm_reco_thread */
                        if (recovery) {
                                if (status != DLM_RECOVERING)
                                        goto retry_lock;
-
-                               mlog(0, "%s: got RECOVERING "
-                                    "for $RECOVERY lock, master "
-                                    "was %u\n", dlm->name,
-                                    res->owner);
                                /* wait to see the node go down, then
                                 * drop down and allow the lockres to
                                 * get cleaned up.  need to remaster. */
@@ -741,6 +731,14 @@ retry_lock:
                        }
                }
 
+               /* Inflight taken in dlm_get_lock_resource() is dropped here */
+               spin_lock(&res->spinlock);
+               dlm_lockres_drop_inflight_ref(dlm, res);
+               spin_unlock(&res->spinlock);
+
+               dlm_lockres_calc_usage(dlm, res);
+               dlm_kick_thread(dlm, res);
+
                if (status != DLM_NORMAL) {
                        lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
                        if (status != DLM_NOTQUEUED)
index 11eefb8c12e98fb418f41c31a3b0a32201be3ca1..005261c333b090f5f53f376bd5bbed55b8e16ba7 100644 (file)
@@ -631,39 +631,54 @@ error:
        return NULL;
 }
 
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  int new_lockres,
-                                  const char *file,
-                                  int line)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+                               struct dlm_lock_resource *res, int bit)
 {
-       if (!new_lockres)
-               assert_spin_locked(&res->spinlock);
+       assert_spin_locked(&res->spinlock);
+
+       mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
+            res->lockname.name, bit, __builtin_return_address(0));
+
+       set_bit(bit, res->refmap);
+}
+
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+                                 struct dlm_lock_resource *res, int bit)
+{
+       assert_spin_locked(&res->spinlock);
+
+       mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
+            res->lockname.name, bit, __builtin_return_address(0));
+
+       clear_bit(bit, res->refmap);
+}
+
+
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
 
-       if (!test_bit(dlm->node_num, res->refmap)) {
-               BUG_ON(res->inflight_locks != 0);
-               dlm_lockres_set_refmap_bit(dlm->node_num, res);
-       }
        res->inflight_locks++;
-       mlog(0, "%s:%.*s: inflight++: now %u\n",
-            dlm->name, res->lockname.len, res->lockname.name,
-            res->inflight_locks);
+
+       mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+            res->lockname.len, res->lockname.name, res->inflight_locks,
+            __builtin_return_address(0));
 }
 
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  const char *file,
-                                  int line)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res)
 {
        assert_spin_locked(&res->spinlock);
 
        BUG_ON(res->inflight_locks == 0);
+
        res->inflight_locks--;
-       mlog(0, "%s:%.*s: inflight--: now %u\n",
-            dlm->name, res->lockname.len, res->lockname.name,
-            res->inflight_locks);
-       if (res->inflight_locks == 0)
-               dlm_lockres_clear_refmap_bit(dlm->node_num, res);
+
+       mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
+            res->lockname.len, res->lockname.name, res->inflight_locks,
+            __builtin_return_address(0));
+
        wake_up(&res->wq);
 }
 
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
        unsigned int hash;
        int tries = 0;
        int bit, wait_on_recovery = 0;
-       int drop_inflight_if_nonlocal = 0;
 
        BUG_ON(!lockid);
 
@@ -709,36 +723,33 @@ lookup:
        spin_lock(&dlm->spinlock);
        tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
        if (tmpres) {
-               int dropping_ref = 0;
-
                spin_unlock(&dlm->spinlock);
-
                spin_lock(&tmpres->spinlock);
-               /* We wait for the other thread that is mastering the resource */
+               /* Wait on the thread that is mastering the resource */
                if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
                        __dlm_wait_on_lockres(tmpres);
                        BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
+                       spin_unlock(&tmpres->spinlock);
+                       dlm_lockres_put(tmpres);
+                       tmpres = NULL;
+                       goto lookup;
                }
 
-               if (tmpres->owner == dlm->node_num) {
-                       BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
-                       dlm_lockres_grab_inflight_ref(dlm, tmpres);
-               } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
-                       dropping_ref = 1;
-               spin_unlock(&tmpres->spinlock);
-
-               /* wait until done messaging the master, drop our ref to allow
-                * the lockres to be purged, start over. */
-               if (dropping_ref) {
-                       spin_lock(&tmpres->spinlock);
-                       __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
+               /* Wait on the resource purge to complete before continuing */
+               if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
+                       BUG_ON(tmpres->owner == dlm->node_num);
+                       __dlm_wait_on_lockres_flags(tmpres,
+                                                   DLM_LOCK_RES_DROPPING_REF);
                        spin_unlock(&tmpres->spinlock);
                        dlm_lockres_put(tmpres);
                        tmpres = NULL;
                        goto lookup;
                }
 
-               mlog(0, "found in hash!\n");
+               /* Grab inflight ref to pin the resource */
+               dlm_lockres_grab_inflight_ref(dlm, tmpres);
+
+               spin_unlock(&tmpres->spinlock);
                if (res)
                        dlm_lockres_put(res);
                res = tmpres;
@@ -829,8 +840,8 @@ lookup:
                 * but they might own this lockres.  wait on them. */
                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
                if (bit < O2NM_MAX_NODES) {
-                       mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
-                            "recover before lock mastery can begin\n",
+                       mlog(0, "%s: res %.*s, At least one node (%d) "
+                            "to recover before lock mastery can begin\n",
                             dlm->name, namelen, (char *)lockid, bit);
                        wait_on_recovery = 1;
                }
@@ -843,12 +854,11 @@ lookup:
 
        /* finally add the lockres to its hash bucket */
        __dlm_insert_lockres(dlm, res);
-       /* since this lockres is new it doesn't not require the spinlock */
-       dlm_lockres_grab_inflight_ref_new(dlm, res);
 
-       /* if this node does not become the master make sure to drop
-        * this inflight reference below */
-       drop_inflight_if_nonlocal = 1;
+       /* Grab inflight ref to pin the resource */
+       spin_lock(&res->spinlock);
+       dlm_lockres_grab_inflight_ref(dlm, res);
+       spin_unlock(&res->spinlock);
 
        /* get an extra ref on the mle in case this is a BLOCK
         * if so, the creator of the BLOCK may try to put the last
@@ -864,8 +874,8 @@ redo_request:
                 * dlm spinlock would be detectable be a change on the mle,
                 * so we only need to clear out the recovery map once. */
                if (dlm_is_recovery_lock(lockid, namelen)) {
-                       mlog(ML_NOTICE, "%s: recovery map is not empty, but "
-                            "must master $RECOVERY lock now\n", dlm->name);
+                       mlog(0, "%s: Recovery map is not empty, but must "
+                            "master $RECOVERY lock now\n", dlm->name);
                        if (!dlm_pre_master_reco_lockres(dlm, res))
                                wait_on_recovery = 0;
                        else {
@@ -883,8 +893,8 @@ redo_request:
                spin_lock(&dlm->spinlock);
                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
                if (bit < O2NM_MAX_NODES) {
-                       mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
-                            "recover before lock mastery can begin\n",
+                       mlog(0, "%s: res %.*s, At least one node (%d) "
+                            "to recover before lock mastery can begin\n",
                             dlm->name, namelen, (char *)lockid, bit);
                        wait_on_recovery = 1;
                } else
@@ -913,8 +923,8 @@ redo_request:
                         * yet, keep going until it does.  this is how the
                         * master will know that asserts are needed back to
                         * the lower nodes. */
-                       mlog(0, "%s:%.*s: requests only up to %u but master "
-                            "is %u, keep going\n", dlm->name, namelen,
+                       mlog(0, "%s: res %.*s, Requests only up to %u but "
+                            "master is %u, keep going\n", dlm->name, namelen,
                             lockid, nodenum, mle->master);
                }
        }
@@ -924,13 +934,12 @@ wait:
        ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
        if (ret < 0) {
                wait_on_recovery = 1;
-               mlog(0, "%s:%.*s: node map changed, redo the "
-                    "master request now, blocked=%d\n",
-                    dlm->name, res->lockname.len,
+               mlog(0, "%s: res %.*s, Node map changed, redo the master "
+                    "request now, blocked=%d\n", dlm->name, res->lockname.len,
                     res->lockname.name, blocked);
                if (++tries > 20) {
-                       mlog(ML_ERROR, "%s:%.*s: spinning on "
-                            "dlm_wait_for_lock_mastery, blocked=%d\n",
+                       mlog(ML_ERROR, "%s: res %.*s, Spinning on "
+                            "dlm_wait_for_lock_mastery, blocked = %d\n",
                             dlm->name, res->lockname.len,
                             res->lockname.name, blocked);
                        dlm_print_one_lock_resource(res);
@@ -940,7 +949,8 @@ wait:
                goto redo_request;
        }
 
-       mlog(0, "lockres mastered by %u\n", res->owner);
+       mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
+            res->lockname.name, res->owner);
        /* make sure we never continue without this */
        BUG_ON(res->owner == O2NM_MAX_NODES);
 
@@ -952,8 +962,6 @@ wait:
 
 wake_waiters:
        spin_lock(&res->spinlock);
-       if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
-               dlm_lockres_drop_inflight_ref(dlm, res);
        res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
        spin_unlock(&res->spinlock);
        wake_up(&res->wq);
@@ -1426,9 +1434,7 @@ way_up_top:
                }
 
                if (res->owner == dlm->node_num) {
-                       mlog(0, "%s:%.*s: setting bit %u in refmap\n",
-                            dlm->name, namelen, name, request->node_idx);
-                       dlm_lockres_set_refmap_bit(request->node_idx, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
                        spin_unlock(&res->spinlock);
                        response = DLM_MASTER_RESP_YES;
                        if (mle)
@@ -1493,10 +1499,8 @@ way_up_top:
                                 * go back and clean the mles on any
                                 * other nodes */
                                dispatch_assert = 1;
-                               dlm_lockres_set_refmap_bit(request->node_idx, res);
-                               mlog(0, "%s:%.*s: setting bit %u in refmap\n",
-                                    dlm->name, namelen, name,
-                                    request->node_idx);
+                               dlm_lockres_set_refmap_bit(dlm, res,
+                                                          request->node_idx);
                        } else
                                response = DLM_MASTER_RESP_NO;
                } else {
@@ -1702,7 +1706,7 @@ again:
                             "lockres, set the bit in the refmap\n",
                             namelen, lockname, to);
                        spin_lock(&res->spinlock);
-                       dlm_lockres_set_refmap_bit(to, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, to);
                        spin_unlock(&res->spinlock);
                }
        }
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
        namelen = res->lockname.len;
        BUG_ON(namelen > O2NM_MAX_NAME_LEN);
 
-       mlog(0, "%s:%.*s: sending deref to %d\n",
-            dlm->name, namelen, lockname, res->owner);
        memset(&deref, 0, sizeof(deref));
        deref.node_idx = dlm->node_num;
        deref.namelen = namelen;
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
        ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
                                 &deref, sizeof(deref), res->owner, &r);
        if (ret < 0)
-               mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-                    "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
-                    res->owner);
+               mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
+                    dlm->name, namelen, lockname, ret, res->owner);
        else if (r < 0) {
                /* BAD.  other node says I did not have a ref. */
-               mlog(ML_ERROR,"while dropping ref on %s:%.*s "
-                   "(master=%u) got %d.\n", dlm->name, namelen,
-                   lockname, res->owner, r);
+               mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
+                    dlm->name, namelen, lockname, res->owner, r);
                dlm_print_one_lock_resource(res);
                BUG();
        }
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
        else {
                BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
                if (test_bit(node, res->refmap)) {
-                       dlm_lockres_clear_refmap_bit(node, res);
+                       dlm_lockres_clear_refmap_bit(dlm, res, node);
                        cleared = 1;
                }
        }
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
        BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
        if (test_bit(node, res->refmap)) {
                __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
-               dlm_lockres_clear_refmap_bit(node, res);
+               dlm_lockres_clear_refmap_bit(dlm, res, node);
                cleared = 1;
        }
        spin_unlock(&res->spinlock);
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
                                BUG_ON(!list_empty(&lock->bast_list));
                                BUG_ON(lock->ast_pending);
                                BUG_ON(lock->bast_pending);
-                               dlm_lockres_clear_refmap_bit(lock->ml.node, res);
+                               dlm_lockres_clear_refmap_bit(dlm, res,
+                                                            lock->ml.node);
                                list_del_init(&lock->list);
                                dlm_lock_put(lock);
                                /* In a normal unlock, we would have added a
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
                        mlog(0, "%s:%.*s: node %u had a ref to this "
                             "migrating lockres, clearing\n", dlm->name,
                             res->lockname.len, res->lockname.name, bit);
-                       dlm_lockres_clear_refmap_bit(bit, res);
+                       dlm_lockres_clear_refmap_bit(dlm, res, bit);
                }
                bit++;
        }
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
                                         &migrate, sizeof(migrate), nodenum,
                                         &status);
                if (ret < 0) {
-                       mlog(ML_ERROR, "Error %d when sending message %u (key "
-                            "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
-                            dlm->key, nodenum);
+                       mlog(ML_ERROR, "%s: res %.*s, Error %d send "
+                            "MIGRATE_REQUEST to node %u\n", dlm->name,
+                            migrate.namelen, migrate.name, ret, nodenum);
                        if (!dlm_is_host_down(ret)) {
                                mlog(ML_ERROR, "unhandled error=%d!\n", ret);
                                BUG();
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
                             dlm->name, res->lockname.len, res->lockname.name,
                             nodenum);
                        spin_lock(&res->spinlock);
-                       dlm_lockres_set_refmap_bit(nodenum, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, nodenum);
                        spin_unlock(&res->spinlock);
                }
        }
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
         * mastery reference here since old_master will briefly have
         * a reference after the migration completes */
        spin_lock(&res->spinlock);
-       dlm_lockres_set_refmap_bit(old_master, res);
+       dlm_lockres_set_refmap_bit(dlm, res, old_master);
        spin_unlock(&res->spinlock);
 
        mlog(0, "now time to do a migrate request to other nodes\n");
index 7efab6d28a21b4ee6a8376559d70f739a4e1da90..01ebfd0bdad72264b99345378f0c6febe246503d 100644 (file)
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
 }
 
 
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
 {
-       if (timeout) {
-               mlog(ML_NOTICE, "%s: waiting %dms for notification of "
-                    "death of node %u\n", dlm->name, timeout, node);
+       if (dlm_is_node_dead(dlm, node))
+               return;
+
+       printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
+              "domain %s\n", node, dlm->name);
+
+       if (timeout)
                wait_event_timeout(dlm->dlm_reco_thread_wq,
-                          dlm_is_node_dead(dlm, node),
-                          msecs_to_jiffies(timeout));
-       } else {
-               mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
-                    "of death of node %u\n", dlm->name, node);
+                                  dlm_is_node_dead(dlm, node),
+                                  msecs_to_jiffies(timeout));
+       else
                wait_event(dlm->dlm_reco_thread_wq,
                           dlm_is_node_dead(dlm, node));
-       }
-       /* for now, return 0 */
-       return 0;
 }
 
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
 {
-       if (timeout) {
-               mlog(0, "%s: waiting %dms for notification of "
-                    "recovery of node %u\n", dlm->name, timeout, node);
+       if (dlm_is_node_recovered(dlm, node))
+               return;
+
+       printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
+              "domain %s\n", node, dlm->name);
+
+       if (timeout)
                wait_event_timeout(dlm->dlm_reco_thread_wq,
-                          dlm_is_node_recovered(dlm, node),
-                          msecs_to_jiffies(timeout));
-       } else {
-               mlog(0, "%s: waiting indefinitely for notification "
-                    "of recovery of node %u\n", dlm->name, node);
+                                  dlm_is_node_recovered(dlm, node),
+                                  msecs_to_jiffies(timeout));
+       else
                wait_event(dlm->dlm_reco_thread_wq,
                           dlm_is_node_recovered(dlm, node));
-       }
-       /* for now, return 0 */
-       return 0;
 }
 
 /* callers of the top-level api calls (dlmlock/dlmunlock) should
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
 {
        spin_lock(&dlm->spinlock);
        BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+       printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
+              dlm->name, dlm->reco.dead_node);
        dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
        spin_unlock(&dlm->spinlock);
 }
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
        BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
        dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
        spin_unlock(&dlm->spinlock);
+       printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
        wake_up(&dlm->reco.event);
 }
 
+static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
+{
+       printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
+              "dead node %u in domain %s\n", dlm->reco.new_master,
+              (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
+              dlm->reco.dead_node, dlm->name);
+}
+
 static int dlm_do_recovery(struct dlm_ctxt *dlm)
 {
        int status = 0;
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
                }
                mlog(0, "another node will master this recovery session.\n");
        }
-       mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
-            dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master,
-            dlm->node_num, dlm->reco.dead_node);
+
+       dlm_print_recovery_master(dlm);
 
        /* it is safe to start everything back up here
         * because all of the dead node's lock resources
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
        return 0;
 
 master_here:
-       mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
-            "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
-            dlm->node_num, dlm->reco.dead_node, dlm->name);
+       dlm_print_recovery_master(dlm);
 
        status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
        if (status < 0) {
                /* we should never hit this anymore */
-               mlog(ML_ERROR, "error %d remastering locks for node %u, "
-                    "retrying.\n", status, dlm->reco.dead_node);
+               mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
+                    "retrying.\n", dlm->name, status, dlm->reco.dead_node);
                /* yield a bit to allow any final network messages
                 * to get handled on remaining nodes */
                msleep(100);
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
                BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
                ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
 
-               mlog(0, "requesting lock info from node %u\n",
+               mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
                     ndata->node_num);
 
                if (ndata->node_num == dlm->node_num) {
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
                spin_unlock(&dlm_reco_state_lock);
        }
 
-       mlog(0, "done requesting all lock info\n");
+       mlog(0, "%s: Done requesting all lock info\n", dlm->name);
 
        /* nodes should be sending reco data now
         * just need to wait */
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
 
        /* negative status is handled by caller */
        if (ret < 0)
-               mlog(ML_ERROR, "Error %d when sending message %u (key "
-                    "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
-                    dlm->key, request_from);
-
+               mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
+                    "to recover dead node %u\n", dlm->name, ret,
+                    request_from, dead_node);
        // return from here, then
        // sleep until all received or error
        return ret;
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
        ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
                                 sizeof(done_msg), send_to, &tmpret);
        if (ret < 0) {
-               mlog(ML_ERROR, "Error %d when sending message %u (key "
-                    "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
-                    dlm->key, send_to);
+               mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
+                    "to recover dead node %u\n", dlm->name, ret, send_to,
+                    dead_node);
                if (!dlm_is_host_down(ret)) {
                        BUG();
                }
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
        if (ret < 0) {
                /* XXX: negative status is not handled.
                 * this will end up killing this node. */
-               mlog(ML_ERROR, "Error %d when sending message %u (key "
-                    "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
-                    dlm->key, send_to);
+               mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
+                    "node %u (%s)\n", dlm->name, mres->lockname_len,
+                    mres->lockname, ret, send_to,
+                    (orig_flags & DLM_MRES_MIGRATION ?
+                     "migration" : "recovery"));
        } else {
                /* might get an -ENOMEM back here */
                ret = status;
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
                             dlm->name, mres->lockname_len, mres->lockname,
                             from);
                        spin_lock(&res->spinlock);
-                       dlm_lockres_set_refmap_bit(from, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, from);
                        spin_unlock(&res->spinlock);
                        added++;
                        break;
@@ -1965,7 +1972,7 @@ skip_lvb:
                        mlog(0, "%s:%.*s: added lock for node %u, "
                             "setting refmap bit\n", dlm->name,
                             res->lockname.len, res->lockname.name, ml->node);
-                       dlm_lockres_set_refmap_bit(ml->node, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, ml->node);
                        added++;
                }
                spin_unlock(&res->spinlock);
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
 
        list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
                if (res->owner == dead_node) {
+                       mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+                            dlm->name, res->lockname.len, res->lockname.name,
+                            res->owner, new_master);
                        list_del_init(&res->recovering);
                        spin_lock(&res->spinlock);
                        /* new_master has our reference from
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
                bucket = dlm_lockres_hash(dlm, i);
                hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
-                       if (res->state & DLM_LOCK_RES_RECOVERING) {
-                               if (res->owner == dead_node) {
-                                       mlog(0, "(this=%u) res %.*s owner=%u "
-                                            "was not on recovering list, but "
-                                            "clearing state anyway\n",
-                                            dlm->node_num, res->lockname.len,
-                                            res->lockname.name, new_master);
-                               } else if (res->owner == dlm->node_num) {
-                                       mlog(0, "(this=%u) res %.*s owner=%u "
-                                            "was not on recovering list, "
-                                            "owner is THIS node, clearing\n",
-                                            dlm->node_num, res->lockname.len,
-                                            res->lockname.name, new_master);
-                               } else
-                                       continue;
+                       if (!(res->state & DLM_LOCK_RES_RECOVERING))
+                               continue;
 
-                               if (!list_empty(&res->recovering)) {
-                                       mlog(0, "%s:%.*s: lockres was "
-                                            "marked RECOVERING, owner=%u\n",
-                                            dlm->name, res->lockname.len,
-                                            res->lockname.name, res->owner);
-                                       list_del_init(&res->recovering);
-                                       dlm_lockres_put(res);
-                               }
-                               spin_lock(&res->spinlock);
-                               /* new_master has our reference from
-                                * the lock state sent during recovery */
-                               dlm_change_lockres_owner(dlm, res, new_master);
-                               res->state &= ~DLM_LOCK_RES_RECOVERING;
-                               if (__dlm_lockres_has_locks(res))
-                                       __dlm_dirty_lockres(dlm, res);
-                               spin_unlock(&res->spinlock);
-                               wake_up(&res->wq);
+                       if (res->owner != dead_node &&
+                           res->owner != dlm->node_num)
+                               continue;
+
+                       if (!list_empty(&res->recovering)) {
+                               list_del_init(&res->recovering);
+                               dlm_lockres_put(res);
                        }
+
+                       /* new_master has our reference from
+                        * the lock state sent during recovery */
+                       mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+                            dlm->name, res->lockname.len, res->lockname.name,
+                            res->owner, new_master);
+                       spin_lock(&res->spinlock);
+                       dlm_change_lockres_owner(dlm, res, new_master);
+                       res->state &= ~DLM_LOCK_RES_RECOVERING;
+                       if (__dlm_lockres_has_locks(res))
+                               __dlm_dirty_lockres(dlm, res);
+                       spin_unlock(&res->spinlock);
+                       wake_up(&res->wq);
                }
        }
 }
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
                             res->lockname.len, res->lockname.name, freed, dead_node);
                        __dlm_print_one_lock_resource(res);
                }
-               dlm_lockres_clear_refmap_bit(dead_node, res);
+               dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
        } else if (test_bit(dead_node, res->refmap)) {
                mlog(0, "%s:%.*s: dead node %u had a ref, but had "
                     "no locks and had not purged before dying\n", dlm->name,
                     res->lockname.len, res->lockname.name, dead_node);
-               dlm_lockres_clear_refmap_bit(dead_node, res);
+               dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
        }
 
        /* do not kick thread yet */
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
                        dlm_revalidate_lvb(dlm, res, dead_node);
                        if (res->owner == dead_node) {
                                if (res->state & DLM_LOCK_RES_DROPPING_REF) {
-                                       mlog(ML_NOTICE, "Ignore %.*s for "
+                                       mlog(ML_NOTICE, "%s: res %.*s, Skip "
                                             "recovery as it is being freed\n",
-                                            res->lockname.len,
+                                            dlm->name, res->lockname.len,
                                             res->lockname.name);
                                } else
                                        dlm_move_lockres_to_recovery_list(dlm,
index 1d6d1d22c4715e3c89bef69570916cc5bf44c259..e73c833fc2a1a97cac35903f0439115cef813c69 100644 (file)
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
 {
        int bit;
 
+       assert_spin_locked(&res->spinlock);
+
        if (__dlm_lockres_has_locks(res))
                return 0;
 
+       /* Locks are in the process of being created */
+       if (res->inflight_locks)
+               return 0;
+
        if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
                return 0;
 
        if (res->state & DLM_LOCK_RES_RECOVERING)
                return 0;
 
+       /* Another node has this resource with this node as the master */
        bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
        if (bit < O2NM_MAX_NODES)
                return 0;
 
-       /*
-        * since the bit for dlm->node_num is not set, inflight_locks better
-        * be zero
-        */
-       BUG_ON(res->inflight_locks != 0);
        return 1;
 }
 
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
                /* clear our bit from the master's refmap, ignore errors */
                ret = dlm_drop_lockres_ref(dlm, res);
                if (ret < 0) {
-                       mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
-                            res->lockname.len, res->lockname.name, ret);
                        if (!dlm_is_host_down(ret))
                                BUG();
                }
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
                BUG();
        }
 
-       __dlm_unhash_lockres(res);
+       __dlm_unhash_lockres(dlm, res);
 
        /* lockres is not in the hash now.  drop the flag and wake up
         * any processes waiting in dlm_get_lock_resource. */
index e1ed5e502ff25dc8afe39de464949ba13c2f9892..81a4cd22f80be84a06eac2b0fbf4348385d76262 100644 (file)
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode)
        mlog(0, "inode %llu take PRMODE open lock\n",
             (unsigned long long)OCFS2_I(inode)->ip_blkno);
 
-       if (ocfs2_mount_local(osb))
+       if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
                goto out;
 
        lockres = &OCFS2_I(inode)->ip_open_lockres;
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
             (unsigned long long)OCFS2_I(inode)->ip_blkno,
             write ? "EXMODE" : "PRMODE");
 
+       if (ocfs2_is_hard_readonly(osb)) {
+               if (write)
+                       status = -EROFS;
+               goto out;
+       }
+
        if (ocfs2_mount_local(osb))
                goto out;
 
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
        if (ocfs2_is_hard_readonly(osb)) {
                if (ex)
                        status = -EROFS;
-               goto bail;
+               goto getbh;
        }
 
        if (ocfs2_mount_local(osb))
@@ -2356,7 +2362,7 @@ local:
                        mlog_errno(status);
                goto bail;
        }
-
+getbh:
        if (ret_bh) {
                status = ocfs2_assign_bh(inode, ret_bh, local_bh);
                if (status < 0) {
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
 
        BUG_ON(!dl);
 
-       if (ocfs2_is_hard_readonly(osb))
-               return -EROFS;
+       if (ocfs2_is_hard_readonly(osb)) {
+               if (ex)
+                       return -EROFS;
+               return 0;
+       }
 
        if (ocfs2_mount_local(osb))
                return 0;
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
        struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
        struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
 
-       if (!ocfs2_mount_local(osb))
+       if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
                ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
 }
 
index 23457b491e8ce53ac5b71d9cd5fc2a5e1400a07f..2f5b92ef0e533146007b49d21dd705a242125dc5 100644 (file)
@@ -832,6 +832,102 @@ out:
        return ret;
 }
 
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
+{
+       struct inode *inode = file->f_mapping->host;
+       int ret;
+       unsigned int is_last = 0, is_data = 0;
+       u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+       u32 cpos, cend, clen, hole_size;
+       u64 extoff, extlen;
+       struct buffer_head *di_bh = NULL;
+       struct ocfs2_extent_rec rec;
+
+       BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
+
+       ret = ocfs2_inode_lock(inode, &di_bh, 0);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       if (*offset >= inode->i_size) {
+               ret = -ENXIO;
+               goto out_unlock;
+       }
+
+       if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+               if (origin == SEEK_HOLE)
+                       *offset = inode->i_size;
+               goto out_unlock;
+       }
+
+       clen = 0;
+       cpos = *offset >> cs_bits;
+       cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
+
+       while (cpos < cend && !is_last) {
+               ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
+                                                &rec, &is_last);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out_unlock;
+               }
+
+               extoff = cpos;
+               extoff <<= cs_bits;
+
+               if (rec.e_blkno == 0ULL) {
+                       clen = hole_size;
+                       is_data = 0;
+               } else {
+                       clen = le16_to_cpu(rec.e_leaf_clusters) -
+                               (cpos - le32_to_cpu(rec.e_cpos));
+                       is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ?  0 : 1;
+               }
+
+               if ((!is_data && origin == SEEK_HOLE) ||
+                   (is_data && origin == SEEK_DATA)) {
+                       if (extoff > *offset)
+                               *offset = extoff;
+                       goto out_unlock;
+               }
+
+               if (!is_last)
+                       cpos += clen;
+       }
+
+       if (origin == SEEK_HOLE) {
+               extoff = cpos;
+               extoff <<= cs_bits;
+               extlen = clen;
+               extlen <<=  cs_bits;
+
+               if ((extoff + extlen) > inode->i_size)
+                       extlen = inode->i_size - extoff;
+               extoff += extlen;
+               if (extoff > *offset)
+                       *offset = extoff;
+               goto out_unlock;
+       }
+
+       ret = -ENXIO;
+
+out_unlock:
+
+       brelse(di_bh);
+
+       up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       ocfs2_inode_unlock(inode, 0);
+out:
+       if (ret && ret != -ENXIO)
+               ret = -ENXIO;
+       return ret;
+}
+
 int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
                           struct buffer_head *bhs[], int flags,
                           int (*validate)(struct super_block *sb,
index e79d41c2c90972fe801a99ec040b229a92debe98..67ea57d2fd594da7e456c1103bb1652fa68b5f69 100644 (file)
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
 int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                 u64 map_start, u64 map_len);
 
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
+
 int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
                             u32 *p_cluster, u32 *num_clusters,
                             struct ocfs2_extent_list *el,
index de4ea1af041b654f8f1b4f1a000f6492226f701f..6e396683c3d48af7321f77b8658754982586cb35 100644 (file)
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
+       if (file->f_flags & O_SYNC)
+               handle->h_sync = 1;
+
        ocfs2_commit_trans(osb, handle);
 
 out_inode_unlock:
@@ -2052,6 +2055,23 @@ out:
        return ret;
 }
 
+static void ocfs2_aiodio_wait(struct inode *inode)
+{
+       wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
+
+       wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
+}
+
+static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
+{
+       int blockmask = inode->i_sb->s_blocksize - 1;
+       loff_t final_size = pos + count;
+
+       if ((pos & blockmask) || (final_size & blockmask))
+               return 1;
+       return 0;
+}
+
 static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
                                            struct file *file,
                                            loff_t pos, size_t count,
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        int full_coherency = !(osb->s_mount_opt &
                               OCFS2_MOUNT_COHERENCY_BUFFERED);
+       int unaligned_dio = 0;
 
        trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
                (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2297,6 +2318,10 @@ relock:
                goto out;
        }
 
+       if (direct_io && !is_sync_kiocb(iocb))
+               unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
+                                                     *ppos);
+
        /*
         * We can't complete the direct I/O as requested, fall back to
         * buffered I/O.
@@ -2311,6 +2336,18 @@ relock:
                goto relock;
        }
 
+       if (unaligned_dio) {
+               /*
+                * Wait on previous unaligned aio to complete before
+                * proceeding.
+                */
+               ocfs2_aiodio_wait(inode);
+
+               /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
+               atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
+               ocfs2_iocb_set_unaligned_aio(iocb);
+       }
+
        /*
         * To later detect whether a journal commit for sync writes is
         * necessary, we sample i_size, and cluster count here.
@@ -2382,8 +2419,12 @@ out_dio:
        if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
                rw_level = -1;
                have_alloc_sem = 0;
+               unaligned_dio = 0;
        }
 
+       if (unaligned_dio)
+               atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+
 out:
        if (rw_level != -1)
                ocfs2_rw_unlock(inode, rw_level);
@@ -2591,6 +2632,57 @@ bail:
        return ret;
 }
 
+/* Refer generic_file_llseek_unlocked() */
+static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
+{
+       struct inode *inode = file->f_mapping->host;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+
+       switch (origin) {
+       case SEEK_SET:
+               break;
+       case SEEK_END:
+               offset += inode->i_size;
+               break;
+       case SEEK_CUR:
+               if (offset == 0) {
+                       offset = file->f_pos;
+                       goto out;
+               }
+               offset += file->f_pos;
+               break;
+       case SEEK_DATA:
+       case SEEK_HOLE:
+               ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
+               if (ret)
+                       goto out;
+               break;
+       default:
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+               ret = -EINVAL;
+       if (!ret && offset > inode->i_sb->s_maxbytes)
+               ret = -EINVAL;
+       if (ret)
+               goto out;
+
+       if (offset != file->f_pos) {
+               file->f_pos = offset;
+               file->f_version = 0;
+       }
+
+out:
+       mutex_unlock(&inode->i_mutex);
+       if (ret)
+               return ret;
+       return offset;
+}
+
 const struct inode_operations ocfs2_file_iops = {
        .setattr        = ocfs2_setattr,
        .getattr        = ocfs2_getattr,
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = {
  * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
  */
 const struct file_operations ocfs2_fops = {
-       .llseek         = generic_file_llseek,
+       .llseek         = ocfs2_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .mmap           = ocfs2_mmap,
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = {
  * the cluster.
  */
 const struct file_operations ocfs2_fops_no_plocks = {
-       .llseek         = generic_file_llseek,
+       .llseek         = ocfs2_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .mmap           = ocfs2_mmap,
index a22d2c098890a9ca67e2056976bfc9869d74a325..17454a904d7bf488093de9f3db61dc529e0f8e3c 100644 (file)
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
        trace_ocfs2_cleanup_delete_inode(
                (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
        if (sync_data)
-               write_inode_now(inode, 1);
+               filemap_write_and_wait(inode->i_mapping);
        truncate_inode_pages(&inode->i_data, 0);
 }
 
index 1c508b149b3ac1bd4325fd33a9aae6bdb70e024a..88924a3133fae7c15ca3f5a5259b64eecd97022e 100644 (file)
@@ -43,6 +43,9 @@ struct ocfs2_inode_info
        /* protects extended attribute changes on this inode */
        struct rw_semaphore             ip_xattr_sem;
 
+       /* Number of outstanding AIO's which are not page aligned */
+       atomic_t                        ip_unaligned_aio;
+
        /* These fields are protected by ip_lock */
        spinlock_t                      ip_lock;
        u32                             ip_open_count;
index bc91072b72196fd335c4b7cbc02ba08cb67254e6..726ff265b296bc3365cfe46e94588c1ee4f6ed6a 100644 (file)
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
        if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
                (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
                if (!capable(CAP_LINUX_IMMUTABLE))
-                       goto bail_unlock;
+                       goto bail_commit;
        }
 
        ocfs2_inode->ip_attr = flags;
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
        if (status < 0)
                mlog_errno(status);
 
+bail_commit:
        ocfs2_commit_trans(osb, handle);
 bail_unlock:
        ocfs2_inode_unlock(inode, 1);
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
        if (!oifi) {
                status = -ENOMEM;
                mlog_errno(status);
-               goto bail;
+               goto out_err;
        }
 
        if (o2info_from_user(*oifi, req))
@@ -431,7 +432,7 @@ bail:
                o2info_set_request_error(&oifi->ifi_req, req);
 
        kfree(oifi);
-
+out_err:
        return status;
 }
 
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
        if (!oiff) {
                status = -ENOMEM;
                mlog_errno(status);
-               goto bail;
+               goto out_err;
        }
 
        if (o2info_from_user(*oiff, req))
@@ -716,7 +717,7 @@ bail:
                o2info_set_request_error(&oiff->iff_req, req);
 
        kfree(oiff);
-
+out_err:
        return status;
 }
 
index 295d56454e8b23b6e9d97a6bd258e6170311c8f5..0a42ae96dca7d4a0f662505e0e51206895ce4156 100644 (file)
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
        /* we need to run complete recovery for offline orphan slots */
        ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
 
-       mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
-            node_num, slot_num,
-            MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+       printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
+              "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+              MINOR(osb->sb->s_dev));
 
        OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
 
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
 
        jbd2_journal_destroy(journal);
 
+       printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
+              "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+              MINOR(osb->sb->s_dev));
 done:
        /* drop the lock on this nodes journal */
        if (got_lock)
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
  * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
  * is done to catch any orphans that are left over in orphan directories.
  *
+ * It scans all slots, even ones that are in use. It does so to handle the
+ * case described below:
+ *
+ *   Node 1 has an inode it was using. The dentry went away due to memory
+ *   pressure.  Node 1 closes the inode, but it's on the free list. The node
+ *   has the open lock.
+ *   Node 2 unlinks the inode. It grabs the dentry lock to notify others,
+ *   but node 1 has no dentry and doesn't get the message. It trylocks the
+ *   open lock, sees that another node has a PR, and does nothing.
+ *   Later node 2 runs its orphan dir. It igets the inode, trylocks the
+ *   open lock, sees the PR still, and does nothing.
+ *   Basically, we have to trigger an orphan iput on node 1. The only way
+ *   for this to happen is if node 1 runs node 2's orphan dir.
+ *
  * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
  * seconds.  It gets an EX lock on os_lockres and checks sequence number
  * stored in LVB. If the sequence number has changed, it means some other
index 68cf2f6d3c6a40b22dda3e2d4f7e2d44349167fa..a3385b63ff5e542bcfaabe59744fa3af38b537e4 100644 (file)
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
 #define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
 
 /* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota
- * update on dir + index leaf + dx root update for free list */
+ * update on dir + index leaf + dx root update for free list +
+ * previous dirblock update in the free list */
 static inline int ocfs2_link_credits(struct super_block *sb)
 {
-       return 2*OCFS2_INODE_UPDATE_CREDITS + 3 +
+       return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
               ocfs2_quota_trans_credits(sb);
 }
 
index 3e9393ca39ebd823772ae910cda1b4516d46fa62..9cd41083e99123eca1c48085fb39809e6b906b40 100644 (file)
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
                                struct page *page)
 {
-       int ret;
+       int ret = VM_FAULT_NOPAGE;
        struct inode *inode = file->f_path.dentry->d_inode;
        struct address_space *mapping = inode->i_mapping;
        loff_t pos = page_offset(page);
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
        void *fsdata;
        loff_t size = i_size_read(inode);
 
-       /*
-        * Another node might have truncated while we were waiting on
-        * cluster locks.
-        * We don't check size == 0 before the shift. This is borrowed
-        * from do_generic_file_read.
-        */
        last_index = (size - 1) >> PAGE_CACHE_SHIFT;
-       if (unlikely(!size || page->index > last_index)) {
-               ret = -EINVAL;
-               goto out;
-       }
 
        /*
-        * The i_size check above doesn't catch the case where nodes
-        * truncated and then re-extended the file. We'll re-check the
-        * page mapping after taking the page lock inside of
-        * ocfs2_write_begin_nolock().
+        * There are cases that lead to the page no longer bebongs to the
+        * mapping.
+        * 1) pagecache truncates locally due to memory pressure.
+        * 2) pagecache truncates when another is taking EX lock against 
+        * inode lock. see ocfs2_data_convert_worker.
+        * 
+        * The i_size check doesn't catch the case where nodes truncated and
+        * then re-extended the file. We'll re-check the page mapping after
+        * taking the page lock inside of ocfs2_write_begin_nolock().
+        *
+        * Let VM retry with these cases.
         */
-       if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
-               /*
-                * the page has been umapped in ocfs2_data_downconvert_worker.
-                * So return 0 here and let VFS retry.
-                */
-               ret = 0;
+       if ((page->mapping != inode->i_mapping) ||
+           (!PageUptodate(page)) ||
+           (page_offset(page) >= size))
                goto out;
-       }
 
        /*
         * Call ocfs2_write_begin() and ocfs2_write_end() to take
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
        if (ret) {
                if (ret != -ENOSPC)
                        mlog_errno(ret);
+               if (ret == -ENOMEM)
+                       ret = VM_FAULT_OOM;
+               else
+                       ret = VM_FAULT_SIGBUS;
                goto out;
        }
 
-       ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
-                                    fsdata);
-       if (ret < 0) {
-               mlog_errno(ret);
+       if (!locked_page) {
+               ret = VM_FAULT_NOPAGE;
                goto out;
        }
+       ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
+                                    fsdata);
        BUG_ON(ret != len);
-       ret = 0;
+       ret = VM_FAULT_LOCKED;
 out:
        return ret;
 }
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 out:
        ocfs2_unblock_signals(&oldset);
-       if (ret)
-               ret = VM_FAULT_SIGBUS;
        return ret;
 }
 
index d53cb706f14c27dfc5ec754dc6d0f846e424421c..184c76b8c293907368f325316da04f57801d87eb 100644 (file)
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
         */
        ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
                                new_phys_cpos);
-       if (!new_phys_cpos) {
+       if (!*new_phys_cpos) {
                ret = -ENOSPC;
                goto out_commit;
        }
index 409285854f647e2357223bc7a8d24c36b376a6bb..d355e6e36b366bfe7dc8cc91cbabdad05976ba2c 100644 (file)
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
 
 static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
 {
-       __test_and_set_bit_le(bit, bitmap);
+       __set_bit_le(bit, bitmap);
 }
 #define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
 
 static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
 {
-       __test_and_clear_bit_le(bit, bitmap);
+       __clear_bit_le(bit, bitmap);
 }
 #define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
 
 #define ocfs2_test_bit test_bit_le
 #define ocfs2_find_next_zero_bit find_next_zero_bit_le
 #define ocfs2_find_next_bit find_next_bit_le
+
+static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
+{
+#if BITS_PER_LONG == 64
+       *bit += ((unsigned long) addr & 7UL) << 3;
+       addr = (void *) ((unsigned long) addr & ~7UL);
+#elif BITS_PER_LONG == 32
+       *bit += ((unsigned long) addr & 3UL) << 3;
+       addr = (void *) ((unsigned long) addr & ~3UL);
+#else
+#error "how many bits you are?!"
+#endif
+       return addr;
+}
+
+static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
+{
+       bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+       ocfs2_set_bit(bit, bitmap);
+}
+
+static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
+{
+       bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+       ocfs2_clear_bit(bit, bitmap);
+}
+
+static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
+{
+       bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+       return ocfs2_test_bit(bit, bitmap);
+}
+
+static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
+                                                       int start)
+{
+       int fix = 0, ret, tmpmax;
+       bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
+       tmpmax = max + fix;
+       start += fix;
+
+       ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
+       if (ret > max)
+               return max;
+       return ret;
+}
+
 #endif  /* OCFS2_H */
 
index dc8007fc924718c6d461b22cee52e4a8fbd6ae7d..f100bf70a9066ed1b917b8ec0451c27231840f92 100644 (file)
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
        int status = 0;
        struct ocfs2_quota_recovery *rec;
 
-       mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num);
+       printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
+              "slot %u\n", osb->dev_str, slot_num);
+
        rec = ocfs2_alloc_quota_recovery();
        if (!rec)
                return ERR_PTR(-ENOMEM);
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
                                goto out_commit;
                        }
                        lock_buffer(qbh);
-                       WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap));
-                       ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
+                       WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
+                       ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
                        le32_add_cpu(&dchunk->dqc_free, 1);
                        unlock_buffer(qbh);
                        ocfs2_journal_dirty(handle, qbh);
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
        struct inode *lqinode;
        unsigned int flags;
 
-       mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
+       printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
+              "slot %u\n", osb->dev_str, slot_num);
+
        mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
        for (type = 0; type < MAXQUOTAS; type++) {
                if (list_empty(&(rec->r_list[type])))
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
                /* Someone else is holding the lock? Then he must be
                 * doing the recovery. Just skip the file... */
                if (status == -EAGAIN) {
-                       mlog(ML_NOTICE, "skipping quota recovery for slot %d "
-                            "because quota file is locked.\n", slot_num);
+                       printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
+                              "device (%s) for slot %d because quota file is "
+                              "locked.\n", osb->dev_str, slot_num);
                        status = 0;
                        goto out_put;
                } else if (status < 0) {
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
                      * ol_quota_entries_per_block(sb);
        }
 
-       found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0);
+       found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
        /* We failed? */
        if (found == len) {
                mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
        struct ocfs2_local_disk_chunk *dchunk;
 
        dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
-       ocfs2_set_bit(*offset, dchunk->dqc_bitmap);
+       ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
        le32_add_cpu(&dchunk->dqc_free, -1);
 }
 
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
                        (od->dq_chunk->qc_headerbh->b_data);
        /* Mark structure as freed */
        lock_buffer(od->dq_chunk->qc_headerbh);
-       ocfs2_clear_bit(offset, dchunk->dqc_bitmap);
+       ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
        le32_add_cpu(&dchunk->dqc_free, 1);
        unlock_buffer(od->dq_chunk->qc_headerbh);
        ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
index 26fc0014d50936137d0afb898b7c9541e8026087..1424c151cccce0170819ce4e0f36dad7d97461b8 100644 (file)
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
                        goto bail;
                }
        } else
-               mlog(ML_NOTICE, "slot %d is already allocated to this node!\n",
-                    slot);
+               printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+                      "allocated to this node!\n", slot, osb->dev_str);
 
        ocfs2_set_slot(si, slot, osb->node_num);
        osb->slot_num = slot;
index 19965b00c43caee7df4e09428775a55150ba9f8c..94368017edb378ce1e3961d1976408c954677c9a 100644 (file)
@@ -28,6 +28,7 @@
 #include "cluster/masklog.h"
 #include "cluster/nodemanager.h"
 #include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
 
 #include "stackglue.h"
 
@@ -255,6 +256,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
        dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
 }
 
+/*
+ * Check if this node is heartbeating and is connected to all other
+ * heartbeating nodes.
+ */
+static int o2cb_cluster_check(void)
+{
+       u8 node_num;
+       int i;
+       unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+       unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
+       node_num = o2nm_this_node();
+       if (node_num == O2NM_MAX_NODES) {
+               printk(KERN_ERR "o2cb: This node has not been configured.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * o2dlm expects o2net sockets to be created. If not, then
+        * dlm_join_domain() fails with a stack of errors which are both cryptic
+        * and incomplete. The idea here is to detect upfront whether we have
+        * managed to connect to all nodes or not. If not, then list the nodes
+        * to allow the user to check the configuration (incorrect IP, firewall,
+        * etc.) Yes, this is racy. But its not the end of the world.
+        */
+#define        O2CB_MAP_STABILIZE_COUNT        60
+       for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
+               o2hb_fill_node_map(hbmap, sizeof(hbmap));
+               if (!test_bit(node_num, hbmap)) {
+                       printk(KERN_ERR "o2cb: %s heartbeat has not been "
+                              "started.\n", (o2hb_global_heartbeat_active() ?
+                                             "Global" : "Local"));
+                       return -EINVAL;
+               }
+               o2net_fill_node_map(netmap, sizeof(netmap));
+               /* Force set the current node to allow easy compare */
+               set_bit(node_num, netmap);
+               if (!memcmp(hbmap, netmap, sizeof(hbmap)))
+                       return 0;
+               if (i < O2CB_MAP_STABILIZE_COUNT)
+                       msleep(1000);
+       }
+
+       printk(KERN_ERR "o2cb: This node could not connect to nodes:");
+       i = -1;
+       while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
+                                 i + 1)) < O2NM_MAX_NODES) {
+               if (!test_bit(i, netmap))
+                       printk(" %u", i);
+       }
+       printk(".\n");
+
+       return -ENOTCONN;
+}
+
 /*
  * Called from the dlm when it's about to evict a node. This is how the
  * classic stack signals node death.
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data)
 {
        struct ocfs2_cluster_connection *conn = data;
 
-       mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n",
-            node_num, conn->cc_namelen, conn->cc_name);
+       printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
+              node_num, conn->cc_namelen, conn->cc_name);
 
        conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
 }
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
        BUG_ON(conn == NULL);
        BUG_ON(conn->cc_proto == NULL);
 
-       /* for now we only have one cluster/node, make sure we see it
-        * in the heartbeat universe */
-       if (!o2hb_check_local_node_heartbeating()) {
-               if (o2hb_global_heartbeat_active())
-                       mlog(ML_ERROR, "Global heartbeat not started\n");
-               rc = -EINVAL;
+       /* Ensure cluster stack is up and all nodes are connected */
+       rc = o2cb_cluster_check();
+       if (rc) {
+               printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
+                      "before retrying.\n");
                goto out;
        }
 
index 56f61027236b696fce1ccde3e1edaf86acee59a0..4994f8b0e60410ff576fa63299e29e430192080a 100644 (file)
@@ -54,6 +54,7 @@
 #include "ocfs1_fs_compat.h"
 
 #include "alloc.h"
+#include "aops.h"
 #include "blockcheck.h"
 #include "dlmglue.h"
 #include "export.h"
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 
                ocfs2_set_ro_flag(osb, 1);
 
-               printk(KERN_NOTICE "Readonly device detected. No cluster "
-                      "services will be utilized for this mount. Recovery "
-                      "will be skipped.\n");
+               printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
+                      "Cluster services will not be used for this mount. "
+                      "Recovery will be skipped.\n", osb->dev_str);
        }
 
        if (!ocfs2_is_hard_readonly(osb)) {
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
        return 0;
 }
 
+wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
 static int __init ocfs2_init(void)
 {
-       int status;
+       int status, i;
 
        ocfs2_print_version();
 
+       for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
+               init_waitqueue_head(&ocfs2__ioend_wq[i]);
+
        status = init_ocfs2_uptodate_cache();
        if (status < 0) {
                mlog_errno(status);
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data)
        ocfs2_extent_map_init(&oi->vfs_inode);
        INIT_LIST_HEAD(&oi->ip_io_markers);
        oi->ip_dir_start_lookup = 0;
-
+       atomic_set(&oi->ip_unaligned_aio, 0);
        init_rwsem(&oi->ip_alloc_sem);
        init_rwsem(&oi->ip_xattr_sem);
        mutex_init(&oi->ip_io_mutex);
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
         * If we failed before we got a uuid_str yet, we can't stop
         * heartbeat.  Otherwise, do it.
         */
-       if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str)
+       if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
+           !ocfs2_is_hard_readonly(osb))
                hangup_needed = 1;
 
        if (osb->cconn)
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
                mlog_errno(status);
                goto bail;
        }
-       cleancache_init_shared_fs((char *)&uuid_net_key, sb);
+       cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
 
 bail:
        return status;
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
                        goto finally;
                }
        } else {
-               mlog(ML_NOTICE, "File system was not unmounted cleanly, "
-                    "recovering volume.\n");
+               printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
+                      "unmounted cleanly, recovering it.\n", osb->dev_str);
        }
 
        local = ocfs2_mount_local(osb);
index 194fb22ef79d590580f3245b522d0b095ef3794c..aa9e8777b09a5e345b081b0e495db378485a30e7 100644 (file)
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode,
                }
 
                ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
-               if (ret < 0) {
-                       mlog_errno(ret);
-                       break;
-               }
 
                ocfs2_commit_trans(osb, ctxt.handle);
                if (ctxt.meta_ac) {
                        ocfs2_free_alloc_context(ctxt.meta_ac);
                        ctxt.meta_ac = NULL;
                }
+
+               if (ret < 0) {
+                       mlog_errno(ret);
+                       break;
+               }
+
        }
 
        if (ctxt.meta_ac)
index 586174168e2ac8818fc51397223fe683b4afa189..80e4645f7990cf4cc0e636a7c19f9840503cdd42 100644 (file)
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freeswap),
                K(global_page_state(NR_FILE_DIRTY)),
                K(global_page_state(NR_WRITEBACK)),
-               K(global_page_state(NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               K(global_page_state(NR_ANON_PAGES)
                  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
-                 HPAGE_PMD_NR
+                 HPAGE_PMD_NR),
+#else
+               K(global_page_state(NR_ANON_PAGES)),
 #endif
-                 ),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SHMEM)),
                K(global_page_state(NR_SLAB_RECLAIMABLE) +
index 9a8a2b77b87479621838ee00c928a43edbe938e9..03102d978180eba68469ef01d13ad25465f5b796 100644 (file)
@@ -91,20 +91,18 @@ static struct file_system_type proc_fs_type = {
 
 void __init proc_root_init(void)
 {
-       struct vfsmount *mnt;
        int err;
 
        proc_init_inodecache();
        err = register_filesystem(&proc_fs_type);
        if (err)
                return;
-       mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
-       if (IS_ERR(mnt)) {
+       err = pid_ns_prepare_proc(&init_pid_ns);
+       if (err) {
                unregister_filesystem(&proc_fs_type);
                return;
        }
 
-       init_pid_ns.proc_mnt = mnt;
        proc_symlink("mounts", NULL, "self/mounts");
 
        proc_net_init();
@@ -209,5 +207,5 @@ int pid_ns_prepare_proc(struct pid_namespace *ns)
 
 void pid_ns_release_proc(struct pid_namespace *ns)
 {
-       mntput(ns->proc_mnt);
+       kern_unmount(ns->proc_mnt);
 }
index 42b274da92c39d539c7c2f0f82a2f825c1a20383..0855e6f20391715c945c32487bb2346f11f0e8a8 100644 (file)
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
                idle = kstat_cpu(cpu).cpustat.idle;
                idle = cputime64_add(idle, arch_idle_time(cpu));
        } else
-               idle = usecs_to_cputime(idle_time);
+               idle = usecs_to_cputime64(idle_time);
 
        return idle;
 }
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
                /* !NO_HZ so we can rely on cpustat.iowait */
                iowait = kstat_cpu(cpu).cpustat.iowait;
        else
-               iowait = usecs_to_cputime(iowait_time);
+               iowait = usecs_to_cputime64(iowait_time);
 
        return iowait;
 }
index 2bd620f0d796cf5dee01590c65b7ac8802e33bf9..57bbf9078ac8f327be28e88b38e10eeff1f9bfc0 100644 (file)
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi)
        }
 
        psinfo = psi;
+       mutex_init(&psinfo->read_mutex);
        spin_unlock(&pstore_lock);
 
        if (owner && !try_module_get(owner)) {
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register);
 void pstore_get_records(int quiet)
 {
        struct pstore_info *psi = psinfo;
+       char                    *buf = NULL;
        ssize_t                 size;
        u64                     id;
        enum pstore_type_id     type;
        struct timespec         time;
        int                     failed = 0, rc;
-       unsigned long           flags;
 
        if (!psi)
                return;
 
-       spin_lock_irqsave(&psinfo->buf_lock, flags);
+       mutex_lock(&psi->read_mutex);
        rc = psi->open(psi);
        if (rc)
                goto out;
 
-       while ((size = psi->read(&id, &type, &time, psi)) > 0) {
-               rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size,
+       while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
+               rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
                                  time, psi);
+               kfree(buf);
+               buf = NULL;
                if (rc && (rc != -EEXIST || !quiet))
                        failed++;
        }
        psi->close(psi);
 out:
-       spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+       mutex_unlock(&psi->read_mutex);
 
        if (failed)
                printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
index 05d6b0e78c959a341137c97fbb2ea2fa89b25197..dba43c3ea3afb6605972d3a0e3eca3ac5248876e 100644 (file)
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
 
 /*
  * Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
  */
 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                  char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                char *p;
 
                p = __d_path(path, root, buf, size);
+               if (!p)
+                       return SEQ_SKIP;
                res = PTR_ERR(p);
                if (!IS_ERR(p)) {
                        char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
        }
        seq_commit(m, res);
 
-       return res < 0 ? res : 0;
+       return res < 0 && res != -ENAMETOOLONG ? res : 0;
 }
 
 /*
index 20403dc5d4378da7a6e962601a8e60740cb6d5db..ae0e76bb6ebf44d5219693e27c08f9e9c8715da2 100644 (file)
@@ -2264,19 +2264,12 @@ static int __init ubifs_init(void)
                return -EINVAL;
        }
 
-       err = register_filesystem(&ubifs_fs_type);
-       if (err) {
-               ubifs_err("cannot register file system, error %d", err);
-               return err;
-       }
-
-       err = -ENOMEM;
        ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
                                sizeof(struct ubifs_inode), 0,
                                SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
                                &inode_slab_ctor);
        if (!ubifs_inode_slab)
-               goto out_reg;
+               return -ENOMEM;
 
        register_shrinker(&ubifs_shrinker_info);
 
@@ -2288,15 +2281,20 @@ static int __init ubifs_init(void)
        if (err)
                goto out_compr;
 
+       err = register_filesystem(&ubifs_fs_type);
+       if (err) {
+               ubifs_err("cannot register file system, error %d", err);
+               goto out_dbg;
+       }
        return 0;
 
+out_dbg:
+       dbg_debugfs_exit();
 out_compr:
        ubifs_compressors_exit();
 out_shrinker:
        unregister_shrinker(&ubifs_shrinker_info);
        kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
-       unregister_filesystem(&ubifs_fs_type);
        return err;
 }
 /* late_initcall to let compressors initialize first */
index b6c4b3795c4a000ce27ac3799b5c39ae6c8044e9..76e4266d2e7e4a8fc3bfc609016e4de467b7e5cf 100644 (file)
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
        int count, i;
 
        count = be32_to_cpu(aclp->acl_cnt);
+       if (count > XFS_ACL_MAX_ENTRIES)
+               return ERR_PTR(-EFSCORRUPTED);
 
        acl = posix_acl_alloc(count, GFP_KERNEL);
        if (!acl)
index d4906e7c97873b302201cddf442bdc5eeb54a29d..c1b55e5965517a9407f678610b62f29fdabf33b3 100644 (file)
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
 /*
  * Query whether the requested number of additional bytes of extended
  * attribute space will be able to fit inline.
+ *
  * Returns zero if not, else the di_forkoff fork offset to be used in the
  * literal area for attribute data once the new bytes have been added.
  *
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        int offset;
        int minforkoff; /* lower limit on valid forkoff locations */
        int maxforkoff; /* upper limit on valid forkoff locations */
-       int dsize;      
+       int dsize;
        xfs_mount_t *mp = dp->i_mount;
 
        offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
                return (offset >= minforkoff) ? minforkoff : 0;
        }
 
-       if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
-               if (bytes <= XFS_IFORK_ASIZE(dp))
-                       return dp->i_d.di_forkoff;
+       /*
+        * If the requested numbers of bytes is smaller or equal to the
+        * current attribute fork size we can always proceed.
+        *
+        * Note that if_bytes in the data fork might actually be larger than
+        * the current data fork size is due to delalloc extents. In that
+        * case either the extent count will go down when they are converted
+        * to real extents, or the delalloc conversion will take care of the
+        * literal area rebalancing.
+        */
+       if (bytes <= XFS_IFORK_ASIZE(dp))
+               return dp->i_d.di_forkoff;
+
+       /*
+        * For attr2 we can try to move the forkoff if there is space in the
+        * literal area, but for the old format we are done if there is no
+        * space in the fixed attribute fork.
+        */
+       if (!(mp->m_flags & XFS_MOUNT_ATTR2))
                return 0;
-       }
 
        dsize = dp->i_df.if_bytes;
-       
+
        switch (dp->i_d.di_format) {
        case XFS_DINODE_FMT_EXTENTS:
-               /* 
+               /*
                 * If there is no attr fork and the data fork is extents, 
-                * determine if creating the default attr fork will result 
-                * in the extents form migrating to btree. If so, the 
-                * minimum offset only needs to be the space required for 
+                * determine if creating the default attr fork will result
+                * in the extents form migrating to btree. If so, the
+                * minimum offset only needs to be the space required for
                 * the btree root.
-                */ 
+                */
                if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
                    xfs_default_attroffset(dp))
                        dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
                break;
-               
        case XFS_DINODE_FMT_BTREE:
                /*
-                * If have data btree then keep forkoff if we have one,
-                * otherwise we are adding a new attr, so then we set 
-                * minforkoff to where the btree root can finish so we have 
+                * If we have a data btree then keep forkoff if we have one,
+                * otherwise we are adding a new attr, so then we set
+                * minforkoff to where the btree root can finish so we have
                 * plenty of room for attrs
                 */
                if (dp->i_d.di_forkoff) {
-                       if (offset < dp->i_d.di_forkoff) 
+                       if (offset < dp->i_d.di_forkoff)
                                return 0;
-                       else 
-                               return dp->i_d.di_forkoff;
-               } else
-                       dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+                       return dp->i_d.di_forkoff;
+               }
+               dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
                break;
        }
-       
-       /* 
-        * A data fork btree root must have space for at least 
+
+       /*
+        * A data fork btree root must have space for at least
         * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
         */
        minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
        maxforkoff = maxforkoff >> 3;   /* rounded down */
 
-       if (offset >= minforkoff && offset < maxforkoff)
-               return offset;
        if (offset >= maxforkoff)
                return maxforkoff;
+       if (offset >= minforkoff)
+               return offset;
        return 0;
 }
 
index c68baeb0974adb2e57f690496fa8957c94c92e0d..d0ab78837057815f17605150d31a633c2eeb2739 100644 (file)
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
        int             tryagain;
        int             error;
 
+       ASSERT(ap->length);
+
        mp = ap->ip->i_mount;
        align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
        if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
        int                     error;
        int                     rt;
 
+       ASSERT(bma->length > 0);
+
        rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
 
        /*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
        ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
        ASSERT(!(flags & XFS_BMAPI_IGSTATE));
        ASSERT(tp != NULL);
+       ASSERT(len > 0);
 
        whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
                XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
-                       bma.length = len;
                        bma.offset = bno;
 
+                       /*
+                        * There's a 32/64 bit type mismatch between the
+                        * allocation length request (which can be 64 bits in
+                        * length) and the bma length request, which is
+                        * xfs_extlen_t and therefore 32 bits. Hence we have to
+                        * check for 32-bit overflows and handle them here.
+                        */
+                       if (len > (xfs_filblks_t)MAXEXTLEN)
+                               bma.length = MAXEXTLEN;
+                       else
+                               bma.length = len;
+
+                       ASSERT(len > 0);
+                       ASSERT(bma.length > 0);
                        error = xfs_bmapi_allocate(&bma, flags);
                        if (error)
                                goto error0;
index da108977b21f8582c7af92e2d4edcdbca8ba07b7..558910f5e3c06a3451558a486a7b982b0f8ee19d 100644 (file)
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
                spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
-               fid->i32.ino = inode->i_ino;
+               fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
                spin_lock(&dentry->d_lock);
-               fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
-               fid64->ino = inode->i_ino;
+               fid64->ino = XFS_I(inode)->i_ino;
                fid64->gen = inode->i_generation;
                break;
        }
index c0237c602f11deb92fa5f533f74a647005fbf1b8..755ee8164880fe4122bac9de94119f1c7086a9b7 100644 (file)
@@ -2835,6 +2835,27 @@ corrupt_out:
        return XFS_ERROR(EFSCORRUPTED);
 }
 
+void
+xfs_promote_inode(
+       struct xfs_inode        *ip)
+{
+       struct xfs_buf          *bp;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+       bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+                       ip->i_imap.im_len, XBF_TRYLOCK);
+       if (!bp)
+               return;
+
+       if (XFS_BUF_ISDELAYWRITE(bp)) {
+               xfs_buf_delwri_promote(bp);
+               wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+       }
+
+       xfs_buf_relse(bp);
+}
+
 /*
  * Return a pointer to the extent record at file index idx.
  */
index 760140d1dd661f42e653576a7c947b11eb8b6fbd..b4cd4739f98e74b2e256295b48fe64e285b320b0 100644 (file)
@@ -498,6 +498,7 @@ int         xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
 void           xfs_iunpin_wait(xfs_inode_t *);
 int            xfs_iflush(xfs_inode_t *, uint);
+void           xfs_promote_inode(struct xfs_inode *);
 void           xfs_lock_inodes(xfs_inode_t **, int, uint);
 void           xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
index a14cd89fe4655e2647d92d2191c1ffb8e6588787..34817adf4b9ed837da47d6f9ccfa977829fdd33c 100644 (file)
@@ -150,6 +150,117 @@ xlog_grant_add_space(
        } while (head_val != old);
 }
 
+STATIC bool
+xlog_reserveq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+               if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+                       need_bytes = tic->t_unit_res * tic->t_cnt;
+               else
+                       need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_grant_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_writeq, t_queue) {
+               ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+               need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_regrant_write_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_grant_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+               trace_xfs_log_grant_wake(log, tic);
+
+               spin_lock(&log->l_grant_reserve_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_writeq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_regrant_write_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+               trace_xfs_log_regrant_write_wake(log, tic);
+
+               spin_lock(&log->l_grant_write_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
 static void
 xlog_tic_reset_res(xlog_ticket_t *tic)
 {
@@ -350,8 +461,19 @@ xfs_log_reserve(
                retval = xlog_grant_log_space(log, internal_ticket);
        }
 
+       if (unlikely(retval)) {
+               /*
+                * If we are failing, make sure the ticket doesn't have any
+                * current reservations.  We don't want to add this back
+                * when the ticket/ transaction gets cancelled.
+                */
+               internal_ticket->t_curr_res = 0;
+               /* ungrant will give back unit_res * t_cnt. */
+               internal_ticket->t_cnt = 0;
+       }
+
        return retval;
-}      /* xfs_log_reserve */
+}
 
 
 /*
@@ -2481,8 +2603,8 @@ restart:
 /*
  * Atomically get the log space required for a log ticket.
  *
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
  *
  * This function is structured so that it has a lock free fast path. This is
  * necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
  * every pass.
  *
  * As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
  */
 STATIC int
-xlog_grant_log_space(xlog_t       *log,
-                    xlog_ticket_t *tic)
+xlog_grant_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int              free_bytes;
-       int              need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("grant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_grant_enter(log, tic);
 
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
+        */
        need_bytes = tic->t_unit_res;
        if (tic->t_flags & XFS_LOG_PERM_RESERV)
                need_bytes *= tic->t_ocnt;
-
-       /* something is already sleeping; insert new transaction at end */
-       if (!list_empty_careful(&log->l_reserveq)) {
-               spin_lock(&log->l_grant_reserve_lock);
-               /* recheck the queue now we are locked */
-               if (list_empty(&log->l_reserveq)) {
-                       spin_unlock(&log->l_grant_reserve_lock);
-                       goto redo;
-               }
-               list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep1(log, tic);
-
-               /*
-                * Gotta check this before going to sleep, while we're
-                * holding the grant lock.
-                */
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               /*
-                * If we got an error, and the filesystem is shutting down,
-                * we'll catch it down below. So just continue...
-                */
-               trace_xfs_log_grant_wake1(log, tic);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_reserveq)) {
                spin_lock(&log->l_grant_reserve_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep2(log, tic);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               trace_xfs_log_grant_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_reserveq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_reserveq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_reserve_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_reserve_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_reserveq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_reserve_lock);
        }
+       if (error)
+               return error;
 
-       /* we've got enough space */
        xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_grant_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-error_return_unlocked:
-       spin_lock(&log->l_grant_reserve_lock);
-error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_reserve_lock);
-       trace_xfs_log_grant_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_grant_log_space */
-
+}
 
 /*
  * Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
  * free fast path.
  */
 STATIC int
-xlog_regrant_write_log_space(xlog_t       *log,
-                            xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int             free_bytes, need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
        tic->t_curr_res = tic->t_unit_res;
        xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t       *log,
        if (tic->t_cnt > 0)
                return 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("regrant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_regrant_write_enter(log, tic);
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
 
-       /* If there are other waiters on the queue then give them a
-        * chance at logspace before us. Wake up the first waiters,
-        * if we do not wake up all the waiters then go to sleep waiting
-        * for more free space, otherwise try to get some space for
-        * this transaction.
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
         */
        need_bytes = tic->t_unit_res;
-       if (!list_empty_careful(&log->l_writeq)) {
-               struct xlog_ticket *ntic;
-
-               spin_lock(&log->l_grant_write_lock);
-               free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-               list_for_each_entry(ntic, &log->l_writeq, t_queue) {
-                       ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
-                       if (free_bytes < ntic->t_unit_res)
-                               break;
-                       free_bytes -= ntic->t_unit_res;
-                       wake_up(&ntic->t_wait);
-               }
-
-               if (ntic != list_first_entry(&log->l_writeq,
-                                               struct xlog_ticket, t_queue)) {
-                       if (list_empty(&tic->t_queue))
-                               list_add_tail(&tic->t_queue, &log->l_writeq);
-                       trace_xfs_log_regrant_write_sleep1(log, tic);
-
-                       xlog_grant_push_ail(log, need_bytes);
-
-                       XFS_STATS_INC(xs_sleep_logspace);
-                       xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-                       trace_xfs_log_regrant_write_wake1(log, tic);
-               } else
-                       spin_unlock(&log->l_grant_write_lock);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_writeq)) {
                spin_lock(&log->l_grant_write_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_writeq);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               trace_xfs_log_regrant_write_sleep2(log, tic);
-               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
-               trace_xfs_log_regrant_write_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_writeq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_writeq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_write_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_write_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_writeq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_write_lock);
        }
 
-       /* we've got enough space */
+       if (error)
+               return error;
+
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_regrant_write_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-
- error_return_unlocked:
-       spin_lock(&log->l_grant_write_lock);
- error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_write_lock);
-       trace_xfs_log_regrant_write_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_regrant_write_log_space */
-
+}
 
 /* The first cnt-1 times through here we don't need to
  * move the grant write head because the permanent
index 5cff443f6cdb782f4d89cfed6e767bdb24a32e06..0bbb1a41998bc95563813ef8a584e104aad5c462 100644 (file)
@@ -674,7 +674,8 @@ xfs_qm_dqattach_one(
         * disk and we didn't ask it to allocate;
         * ESRCH if quotas got turned off suddenly.
         */
-       error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
+       error = xfs_qm_dqget(ip->i_mount, ip, id, type,
+                            doalloc | XFS_QMOPT_DOWARN, &dqp);
        if (error)
                return error;
 
index 3eca58f51ae9040b0fcc5fe6b809df38dbf0b4ac..8a899496fd5fe55ef50f4cfdcfcce20e7875a60c 100644 (file)
@@ -868,27 +868,6 @@ xfs_fs_dirty_inode(
        XFS_I(inode)->i_update_core = 1;
 }
 
-STATIC int
-xfs_log_inode(
-       struct xfs_inode        *ip)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       struct xfs_trans        *tp;
-       int                     error;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       return xfs_trans_commit(tp, 0);
-}
-
 STATIC int
 xfs_fs_write_inode(
        struct inode            *inode,
@@ -902,10 +881,8 @@ xfs_fs_write_inode(
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return -XFS_ERROR(EIO);
-       if (!ip->i_update_core)
-               return 0;
 
-       if (wbc->sync_mode == WB_SYNC_ALL) {
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
                /*
                 * Make sure the inode has made it it into the log.  Instead
                 * of forcing it all the way to stable storage using a
@@ -913,11 +890,14 @@ xfs_fs_write_inode(
                 * ->sync_fs call do that for thus, which reduces the number
                 * of synchronous log forces dramatically.
                 */
-               error = xfs_log_inode(ip);
+               error = xfs_log_dirty_inode(ip, NULL, 0);
                if (error)
                        goto out;
                return 0;
        } else {
+               if (!ip->i_update_core)
+                       return 0;
+
                /*
                 * We make this non-blocking if the inode is contended, return
                 * EAGAIN to indicate to the caller that they did not succeed.
index aa3dc1a4d53d4f85f97a38f0db217bfcc4e3c953..f0994aedcd158c2db3d6f9b2bf4d21a4f819bec6 100644 (file)
@@ -336,6 +336,32 @@ xfs_sync_fsdata(
        return error;
 }
 
+int
+xfs_log_dirty_inode(
+       struct xfs_inode        *ip,
+       struct xfs_perag        *pag,
+       int                     flags)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       if (!ip->i_update_core)
+               return 0;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       return xfs_trans_commit(tp, 0);
+}
+
 /*
  * When remounting a filesystem read-only or freezing the filesystem, we have
  * two phases to execute. This first phase is syncing the data before we
@@ -359,6 +385,16 @@ xfs_quiesce_data(
 {
        int                     error, error2 = 0;
 
+       /*
+        * Log all pending size and timestamp updates.  The vfs writeback
+        * code is supposed to do this, but due to its overagressive
+        * livelock detection it will skip inodes where appending writes
+        * were written out in the first non-blocking sync phase if their
+        * completion took long enough that it happened after taking the
+        * timestamp for the cut-off in the blocking phase.
+        */
+       xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
+
        xfs_qm_sync(mp, SYNC_TRYLOCK);
        xfs_qm_sync(mp, SYNC_WAIT);
 
@@ -770,6 +806,17 @@ restart:
        if (!xfs_iflock_nowait(ip)) {
                if (!(sync_mode & SYNC_WAIT))
                        goto out;
+
+               /*
+                * If we only have a single dirty inode in a cluster there is
+                * a fair chance that the AIL push may have pushed it into
+                * the buffer, but xfsbufd won't touch it until 30 seconds
+                * from now, and thus we will lock up here.
+                *
+                * Promote the inode buffer to the front of the delwri list
+                * and wake up xfsbufd now.
+                */
+               xfs_promote_inode(ip);
                xfs_iflock(ip);
        }
 
index 941202e7ac6e594e2c423c19bc89248397e39516..fa965479d788d29da66b0e85bd59123c1fe08c65 100644 (file)
@@ -34,6 +34,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
 
 void xfs_flush_inodes(struct xfs_inode *ip);
 
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
 int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
 int xfs_reclaim_inodes_count(struct xfs_mount *mp);
 void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
index f1d2802b2f0782130954248f11237c10cf09e92f..49403579887324b87c821bc7eb62b0114314ce20 100644 (file)
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
index 62ce6823c0f2ac82ccdf285cf99dc8d36a9bb80b..12a1764f612b2709360eb90f9c084b2dff3a8bb9 100644 (file)
@@ -40,6 +40,7 @@ typedef u64 cputime64_t;
  */
 #define cputime_to_usecs(__ct)         jiffies_to_usecs(__ct)
 #define usecs_to_cputime(__msecs)      usecs_to_jiffies(__msecs)
+#define usecs_to_cputime64(__msecs)    nsecs_to_jiffies64((__msecs) * 1000)
 
 /*
  * Convert cputime to seconds and back.
index f4c38d8c6674a3dd71ea08a47dc68e89e81e0d92..2292d1af9d705f129ae523ce00a6b7794fb1648c 100644 (file)
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
 __SYSCALL(__NR_setns, sys_setns)
 #define __NR_sendmmsg 269
 __SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+          compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+          compat_sys_process_vm_writev)
 
 #undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
 
 /*
  * All syscalls below here should go away really,
index d30bedfeb7efd22a288b2323b870d40593f56124..ddd46db65b57257164f13d703186bb7da5e7bb3a 100644 (file)
@@ -235,6 +235,8 @@ struct drm_mode_fb_cmd {
 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
 #define DRM_MODE_FB_DIRTY_FLAGS         0x03
 
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS     256
+
 /*
  * Mark a region of a framebuffer as dirty.
  *
index 3d53efd25ab906889e081acb8ae10f1065ef180a..14b6cd022284dfec490a2119b06de2ecb9386b76 100644 (file)
@@ -4,6 +4,7 @@
 */
 #define radeon_PCI_IDS \
        {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -55,6 +56,7 @@
        {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index 1d161cb3aca5c098b76dcc1fdd47bdd8de579814..12050434d57a248909d695a80445e958dea81e32 100644 (file)
 /**
  * User-desired buffer creation information structure.
  *
- * @size: requested size for the object.
+ * @size: user-desired memory allocation size.
  *     - this size value would be page-aligned internally.
  * @flags: user request for setting memory type or cache attributes.
- * @handle: returned handle for the object.
- * @pad: just padding to be 64-bit aligned.
+ * @handle: returned a handle to created gem object.
+ *     - this handle will be set by gem module of kernel side.
  */
 struct drm_exynos_gem_create {
-       unsigned int size;
+       uint64_t size;
        unsigned int flags;
        unsigned int handle;
-       unsigned int pad;
 };
 
 /**
index b65be6054a183efe7b0ea5daa7c8afd7b1aa6a72..be94be6d6f17afdd027b3cf61679c73651ac6f9e 100644 (file)
@@ -874,6 +874,10 @@ struct drm_radeon_gem_pwrite {
 
 #define RADEON_CHUNK_ID_RELOCS 0x01
 #define RADEON_CHUNK_ID_IB     0x02
+#define RADEON_CHUNK_ID_FLAGS  0x03
+
+/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
+#define RADEON_CS_KEEP_TILING_FLAGS 0x01
 
 struct drm_radeon_cs_chunk {
        uint32_t                chunk_id;
index 619b5657af77b232541a56a6e3cd06d7f2693210..c94e71781b7983dd537f9b99e76ab106caf1d390 100644 (file)
@@ -185,6 +185,7 @@ header-y += if_pppol2tp.h
 header-y += if_pppox.h
 header-y += if_slip.h
 header-y += if_strip.h
+header-y += if_team.h
 header-y += if_tr.h
 header-y += if_tun.h
 header-y += if_tunnel.h
@@ -194,7 +195,9 @@ header-y += igmp.h
 header-y += in.h
 header-y += in6.h
 header-y += in_route.h
+header-y += sock_diag.h
 header-y += inet_diag.h
+header-y += unix_diag.h
 header-y += inotify.h
 header-y += input.h
 header-y += ioctl.h
index 49a83ca900ba7209926b89e8c98789703ee23c00..f4ff882cb2daa864d5e0e0c318783229ebf5d9f5 100644 (file)
@@ -445,16 +445,6 @@ void vcc_insert_socket(struct sock *sk);
 
 void atm_dev_release_vccs(struct atm_dev *dev);
 
-/*
- * This is approximately the algorithm used by alloc_skb.
- *
- */
-
-static inline int atm_guess_pdu2truesize(int size)
-{
-       return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info);
-}
-
 
 static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
 {
index a3c071c9e18934765e44b84d8330e9fd979d6dc4..847994aef0e9755a3a39d76dd42f87c9f43d23be 100644 (file)
@@ -211,8 +211,8 @@ extern void bio_pair_release(struct bio_pair *dbio);
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
 
-extern struct bio *bio_alloc(gfp_t, int);
-extern struct bio *bio_kmalloc(gfp_t, int);
+extern struct bio *bio_alloc(gfp_t, unsigned int);
+extern struct bio *bio_kmalloc(gfp_t, unsigned int);
 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 extern void bio_put(struct bio *);
 extern void bio_free(struct bio *, struct bio_set *);
@@ -519,7 +519,11 @@ extern void bio_integrity_init(void);
 #define bioset_integrity_create(a, b)  (0)
 #define bio_integrity_prep(a)          (0)
 #define bio_integrity_enabled(a)       (0)
-#define bio_integrity_clone(a, b, c, d)        (0)
+static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+                                     gfp_t gfp_mask, struct bio_set *bs)
+{
+       return 0;
+}
 #define bioset_integrity_free(a)       do { } while (0)
 #define bio_integrity_free(a, b)       do { } while (0)
 #define bio_integrity_endio(a, b)      do { } while (0)
index c7a6d3b5bc7bb010051c887e5e0c64fc7adca113..94acd8172b5bfcb0986014916533501f90ac047d 100644 (file)
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
  */
 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
                                        spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
-                                                          request_fn_proc *,
-                                                          spinlock_t *, int node_id);
 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
                                                      request_fn_proc *, spinlock_t *);
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
new file mode 100644 (file)
index 0000000..7702641
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _CAN_PLATFORM_CC770_H_
+#define _CAN_PLATFORM_CC770_H_
+
+/* CPU Interface Register (0x02) */
+#define CPUIF_CEN      0x01    /* Clock Out Enable */
+#define CPUIF_MUX      0x04    /* Multiplex */
+#define CPUIF_SLP      0x08    /* Sleep */
+#define CPUIF_PWD      0x10    /* Power Down Mode */
+#define CPUIF_DMC      0x20    /* Divide Memory Clock */
+#define CPUIF_DSC      0x40    /* Divide System Clock */
+#define CPUIF_RST      0x80    /* Hardware Reset Status */
+
+/* Clock Out Register (0x1f) */
+#define CLKOUT_CD_MASK  0x0f   /* Clock Divider mask */
+#define CLKOUT_SL_MASK 0x30    /* Slew Rate mask */
+#define CLKOUT_SL_SHIFT        4
+
+/* Bus Configuration Register (0x2f) */
+#define BUSCFG_DR0     0x01    /* Disconnect RX0 Input / Select RX input */
+#define BUSCFG_DR1     0x02    /* Disconnect RX1 Input / Silent mode */
+#define BUSCFG_DT1     0x08    /* Disconnect TX1 Output */
+#define BUSCFG_POL     0x20    /* Polarity dominant or recessive */
+#define BUSCFG_CBY     0x40    /* Input Comparator Bypass */
+
+struct cc770_platform_data {
+       u32 osc_freq;   /* CAN bus oscillator frequency in Hz */
+
+       u8 cir;         /* CPU Interface Register */
+       u8 cor;         /* Clock Out Register */
+       u8 bcr;         /* Bus Configuration Register */
+};
+
+#endif /* !_CAN_PLATFORM_CC770_H_ */
index f88eacb111d4151dc5491797fe0e03faaadc26a9..7c05ac202d90650069d4713ac2e13b3be9b86024 100644 (file)
 #include "osdmap.h"
 #include "messenger.h"
 
+/* 
+ * Maximum object name size 
+ * (must be at least as big as RBD_MAX_MD_NAME_LEN -- currently 100) 
+ */
+#define MAX_OBJ_NAME_SIZE 100
+
 struct ceph_msg;
 struct ceph_snap_context;
 struct ceph_osd_request;
@@ -75,7 +81,7 @@ struct ceph_osd_request {
        struct inode *r_inode;                /* for use by callbacks */
        void *r_priv;                         /* ditto */
 
-       char              r_oid[40];          /* object name */
+       char              r_oid[MAX_OBJ_NAME_SIZE];          /* object name */
        int               r_oid_len;
        unsigned long     r_stamp;            /* send OR check time */
 
index ac663c18776c95fdf77f7eb49ce2b4503d2cb795..0bd390ce98b2a9e9fd5fa69a697bb3d02f252194 100644 (file)
@@ -59,8 +59,16 @@ SUBSYS(net_cls)
 SUBSYS(blkio)
 #endif
 
+/* */
+
 #ifdef CONFIG_CGROUP_PERF
 SUBSYS(perf)
 #endif
 
 /* */
+
+#ifdef CONFIG_NETPRIO_CGROUP
+SUBSYS(net_prio)
+#endif
+
+/* */
index 139c4db55f1736eebc35cc4601ee98d0d10a40c7..081147da05642dbf98d53052e96f109301368910 100644 (file)
@@ -71,7 +71,7 @@ struct timecounter {
 
 /**
  * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc:                Pointer to cycle counter.
+ * @cc:                Pointer to cycle counter.
  * @cycles:    Cycles
  *
  * XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
  *                        time base as values returned by
  *                        timecounter_read()
  * @tc:                Pointer to time counter.
- * @cycle:     a value returned by tc->cc->read()
+ * @cycle_tstamp:      a value returned by tc->cc->read()
  *
  * Cycle counts that are converted correctly as long as they
  * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,10 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
  * @mult:              cycle to nanosecond multiplier
  * @shift:             cycle to nanosecond divisor (power of two)
  * @max_idle_ns:       max idle time permitted by the clocksource (nsecs)
+ * @maxadj:            maximum adjustment value to mult (~11%)
  * @flags:             flags describing special properties
  * @archdata:          arch-specific data
  * @suspend:           suspend function for the clocksource, if necessary
  * @resume:            resume function for the clocksource, if necessary
+ * @cycle_last:                most recent cycle counter value seen by ::read()
  */
 struct clocksource {
        /*
@@ -172,7 +174,7 @@ struct clocksource {
        u32 mult;
        u32 shift;
        u64 max_idle_ns;
-
+       u32 maxadj;
 #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
        struct arch_clocksource_data archdata;
 #endif
@@ -186,6 +188,7 @@ struct clocksource {
        void (*suspend)(struct clocksource *cs);
        void (*resume)(struct clocksource *cs);
 
+       /* private: */
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
        struct list_head wd_list;
@@ -260,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
 
 /**
  * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles:    cycles
+ * @mult:      cycle to nanosecond multiplier
+ * @shift:     cycle to nanosecond divisor (power of two)
  *
  * Converts cycles to nanoseconds, using the given mult and shift.
  *
index 154bf56830156876d56c1ced7d4c1e6c96973805..66ed067fb7291e89f1548718e581d1ed348c4120 100644 (file)
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index 4df926199369622bffed05e3e60eeff8c42e4532..ed9f74f6c519a1f071348d691d69c7ed5795e938 100644 (file)
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
  */
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
 extern char *d_path(const struct path *, char *, int);
 extern char *d_path_with_unreachable(const struct path *, char *, int);
 extern char *dentry_path_raw(struct dentry *, char *, int);
index ffbcf95cd97dbb3b7e12f9be778c90f1977a9a1a..3136ede5a1e1bb8434a0e2d52840f84de39647de 100644 (file)
@@ -69,7 +69,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
  * @resume:    Called to bring a device on this bus out of sleep mode.
  * @pm:                Power management operations of this bus, callback the specific
  *             device driver's pm-ops.
- * @iommu_ops   IOMMU specific operations for this bus, used to attach IOMMU
+ * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
  *              driver implementations to a bus and allow the driver to do
  *              bus-specific setup
  * @p:         The private data of the driver core, only the driver core can
@@ -682,6 +682,11 @@ static inline bool device_async_suspend_enabled(struct device *dev)
        return !!dev->power.async_suspend;
 }
 
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
+{
+       dev->power.ignore_children = enable;
+}
+
 static inline void device_lock(struct device *dev)
 {
        mutex_lock(&dev->mutex);
index ef90cbd8e1735c781083a2321760741107c2827b..57c9a8ae4f2df2127dffe7e88aee95b5f2cdce1a 100644 (file)
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
+extern int intel_iommu_enabled;
 #else
 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
 {
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
 {
 }
 #define dmar_disabled  (1)
+#define intel_iommu_enabled (0)
 #endif
 
 
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
new file mode 100644 (file)
index 0000000..5621547
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Dynamic queue limits (dql) - Definitions
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ *
+ * This header file contains the definitions for dynamic queue limits (dql).
+ * dql would be used in conjunction with a producer/consumer type queue
+ * (possibly a HW queue).  Such a queue would have these general properties:
+ *
+ *   1) Objects are queued up to some limit specified as number of objects.
+ *   2) Periodically a completion process executes which retires consumed
+ *      objects.
+ *   3) Starvation occurs when limit has been reached, all queued data has
+ *      actually been consumed, but completion processing has not yet run
+ *      so queuing new data is blocked.
+ *   4) Minimizing the amount of queued data is desirable.
+ *
+ * The goal of dql is to calculate the limit as the minimum number of objects
+ * needed to prevent starvation.
+ *
+ * The primary functions of dql are:
+ *    dql_queued - called when objects are enqueued to record number of objects
+ *    dql_avail - returns how many objects are available to be queued based
+ *      on the object limit and how many objects are already enqueued
+ *    dql_completed - called at completion time to indicate how many objects
+ *      were retired from the queue
+ *
+ * The dql implementation does not implement any locking for the dql data
+ * structures, the higher layer should provide this.  dql_queued should
+ * be serialized to prevent concurrent execution of the function; this
+ * is also true for  dql_completed.  However, dql_queued and dlq_completed  can
+ * be executed concurrently (i.e. they can be protected by different locks).
+ */
+
+#ifndef _LINUX_DQL_H
+#define _LINUX_DQL_H
+
+#ifdef __KERNEL__
+
+struct dql {
+       /* Fields accessed in enqueue path (dql_queued) */
+       unsigned int    num_queued;             /* Total ever queued */
+       unsigned int    adj_limit;              /* limit + num_completed */
+       unsigned int    last_obj_cnt;           /* Count at last queuing */
+
+       /* Fields accessed only by completion path (dql_completed) */
+
+       unsigned int    limit ____cacheline_aligned_in_smp; /* Current limit */
+       unsigned int    num_completed;          /* Total ever completed */
+
+       unsigned int    prev_ovlimit;           /* Previous over limit */
+       unsigned int    prev_num_queued;        /* Previous queue total */
+       unsigned int    prev_last_obj_cnt;      /* Previous queuing cnt */
+
+       unsigned int    lowest_slack;           /* Lowest slack found */
+       unsigned long   slack_start_time;       /* Time slacks seen */
+
+       /* Configuration */
+       unsigned int    max_limit;              /* Max limit */
+       unsigned int    min_limit;              /* Minimum limit */
+       unsigned int    slack_hold_time;        /* Time to measure slack */
+};
+
+/* Set some static maximums */
+#define DQL_MAX_OBJECT (UINT_MAX / 16)
+#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
+
+/*
+ * Record number of objects queued. Assumes that caller has already checked
+ * availability in the queue with dql_avail.
+ */
+static inline void dql_queued(struct dql *dql, unsigned int count)
+{
+       BUG_ON(count > DQL_MAX_OBJECT);
+
+       dql->num_queued += count;
+       dql->last_obj_cnt = count;
+}
+
+/* Returns how many objects can be queued, < 0 indicates over limit. */
+static inline int dql_avail(const struct dql *dql)
+{
+       return dql->adj_limit - dql->num_queued;
+}
+
+/* Record number of completed objects and recalculate the limit. */
+void dql_completed(struct dql *dql, unsigned int count);
+
+/* Reset dql state */
+void dql_reset(struct dql *dql);
+
+/* Initialize dql state */
+int dql_init(struct dql *dql, unsigned hold_time);
+
+#endif /* _KERNEL_ */
+
+#endif /* _LINUX_DQL_H */
index c4627cbdb8e038e4f61187db301c13a6acc12742..e50f98b0297a36b1d844ca5ca1d7c9f074e8677a 100644 (file)
@@ -33,6 +33,7 @@
 #define PCI_EEPROM_WIDTH_93C86 8
 #define PCI_EEPROM_WIDTH_OPCODE        3
 #define PCI_EEPROM_WRITE_OPCODE        0x05
+#define PCI_EEPROM_ERASE_OPCODE 0x07
 #define PCI_EEPROM_READ_OPCODE 0x06
 #define PCI_EEPROM_EWDS_OPCODE 0x10
 #define PCI_EEPROM_EWEN_OPCODE 0x13
@@ -46,6 +47,7 @@
  * @register_write(struct eeprom_93cx6 *eeprom): handler to
  * write to the eeprom register by using all reg_* fields.
  * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @drive_data: Set if we're driving the data line.
  * @reg_data_in: register field to indicate data input
  * @reg_data_out: register field to indicate data output
  * @reg_data_clock: register field to set the data clock
@@ -62,6 +64,7 @@ struct eeprom_93cx6 {
 
        int width;
 
+       char drive_data;
        char reg_data_in;
        char reg_data_out;
        char reg_data_clock;
@@ -72,3 +75,8 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
        const u8 word, u16 *data);
 extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
        const u8 word, __le16 *data, const u16 words);
+
+extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
+
+extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
+                              u8 addr, u16 data);
index c9f522bd17e4df214e297eafa73cae7003f1b763..fd0628be45ce22ecc07ff1be9db1cdc18fdf2b80 100644 (file)
@@ -25,7 +25,7 @@ struct sock_extended_err {
 #ifdef __KERNEL__
 
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -34,7 +34,7 @@ struct sock_extended_err {
 struct sock_exterr_skb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index de33de1e205295150efb604dbcf6a56f9b4e91e5..da5b2de99ae450a1239cfb3cf6f24d7e71fc9b4c 100644 (file)
@@ -489,7 +489,10 @@ struct ethtool_rx_flow_spec {
  * on return.
  *
  * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined
- * rules on return.
+ * rules on return.  If @data is non-zero on return then it is the
+ * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the
+ * driver supports any special location values.  If that flag is not
+ * set in @data then special location values should not be used.
  *
  * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an
  * existing rule on entry and @fs contains the rule on return.
@@ -501,10 +504,23 @@ struct ethtool_rx_flow_spec {
  * must use the second parameter to get_rxnfc() instead of @rule_locs.
  *
  * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update.
- * @fs.@location specifies the location to use and must not be ignored.
+ * @fs.@location either specifies the location to use or is a special
+ * location value with %RX_CLS_LOC_SPECIAL flag set.  On return,
+ * @fs.@location is the actual rule location.
  *
  * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an
  * existing rule on entry.
+ *
+ * A driver supporting the special location values for
+ * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused
+ * location, and may remove a rule at a later location (lower
+ * priority) that matches exactly the same set of flows.  The special
+ * values are: %RX_CLS_LOC_ANY, selecting any location;
+ * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum
+ * priority); and %RX_CLS_LOC_LAST, selecting the last suitable
+ * location (minimum priority).  Additional special values may be
+ * defined in future and drivers must return -%EINVAL for any
+ * unrecognised value.
  */
 struct ethtool_rxnfc {
        __u32                           cmd;
@@ -543,9 +559,15 @@ struct compat_ethtool_rxnfc {
 /**
  * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
  * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
- * @size: On entry, the array size of the user buffer.  On return from
- *     %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table.
+ * @size: On entry, the array size of the user buffer, which may be zero.
+ *     On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware
+ *     indirection table.
  * @ring_index: RX ring/queue index for each hash value
+ *
+ * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size
+ * should be returned.  For %ETHTOOL_SRXFHINDIR, a @size of zero means
+ * the table should be reset to default values.  This last feature
+ * is not supported by the original implementations.
  */
 struct ethtool_rxfh_indir {
        __u32   cmd;
@@ -724,9 +746,6 @@ enum ethtool_sfeatures_retval_bits {
 
 #include <linux/rculist.h>
 
-/* needed by dev_disable_lro() */
-extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
-
 extern int __ethtool_get_settings(struct net_device *dev,
                                  struct ethtool_cmd *cmd);
 
@@ -750,19 +769,18 @@ struct net_device;
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_tx_csum(struct net_device *dev);
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data);
-u32 ethtool_op_get_sg(struct net_device *dev);
-int ethtool_op_set_sg(struct net_device *dev, u32 data);
-u32 ethtool_op_get_tso(struct net_device *dev);
-int ethtool_op_set_tso(struct net_device *dev, u32 data);
-u32 ethtool_op_get_ufo(struct net_device *dev);
-int ethtool_op_set_ufo(struct net_device *dev, u32 data);
-u32 ethtool_op_get_flags(struct net_device *dev);
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
+
+/**
+ * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
+ * @index: Index in RX flow hash indirection table
+ * @n_rx_rings: Number of RX rings to use
+ *
+ * This function provides the default policy for RX flow hash indirection.
+ */
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+       return index % n_rx_rings;
+}
 
 /**
  * struct ethtool_ops - optional netdev operations
@@ -807,22 +825,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
  * @get_pauseparam: Report pause parameters
  * @set_pauseparam: Set pause parameters.  Returns a negative error code
  *     or zero.
- * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM.
- *     Report whether receive checksums are turned on or off.
- * @set_rx_csum: Deprecated in favour of generic netdev features.  Turn
- *     receive checksum on or off.  Returns a negative error code or zero.
- * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
- *     are turned on or off.
- * @set_tx_csum: Deprecated in favour of generic netdev features.  Turn
- *     transmit checksums on or off.  Returns a negative error code or zero.
- * @get_sg: Deprecated as redundant.  Report whether scatter-gather is
- *     enabled.  
- * @set_sg: Deprecated in favour of generic netdev features.  Turn
- *     scatter-gather on or off. Returns a negative error code or zero.
- * @get_tso: Deprecated as redundant.  Report whether TCP segmentation
- *     offload is enabled.
- * @set_tso: Deprecated in favour of generic netdev features.  Turn TCP
- *     segmentation offload on or off.  Returns a negative error code or zero.
  * @self_test: Run specified self-tests
  * @get_strings: Return a set of strings that describe the requested objects
  * @set_phys_id: Identify the physical devices, e.g. by flashing an LED
@@ -844,15 +846,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
  *     negative error code or zero.
  * @complete: Function to be called after any other operation except
  *     @begin.  Will be called even if the other operation failed.
- * @get_ufo: Deprecated as redundant.  Report whether UDP fragmentation
- *     offload is enabled.
- * @set_ufo: Deprecated in favour of generic netdev features.  Turn UDP
- *     fragmentation offload on or off.  Returns a negative error code or zero.
- * @get_flags: Deprecated as redundant.  Report features included in
- *     &enum ethtool_flags that are enabled.  
- * @set_flags: Deprecated in favour of generic netdev features.  Turn
- *     features included in &enum ethtool_flags on or off.  Returns a
- *     negative error code or zero.
  * @get_priv_flags: Report driver-specific feature flags.
  * @set_priv_flags: Set driver-specific feature flags.  Returns a negative
  *     error code or zero.
@@ -866,11 +859,13 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
  * @reset: Reset (part of) the device, as specified by a bitmask of
  *     flags from &enum ethtool_reset_flags.  Returns a negative
  *     error code or zero.
- * @set_rx_ntuple: Set an RX n-tuple rule.  Returns a negative error code
- *     or zero.
+ * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
+ *     Returns zero if not supported for this specific device.
  * @get_rxfh_indir: Get the contents of the RX flow hash indirection table.
+ *     Will not be called if @get_rxfh_indir_size returns zero.
  *     Returns a negative error code or zero.
  * @set_rxfh_indir: Set the contents of the RX flow hash indirection table.
+ *     Will not be called if @get_rxfh_indir_size returns zero.
  *     Returns a negative error code or zero.
  * @get_channels: Get number of channels.
  * @set_channels: Set number of channels.  Returns a negative error code or
@@ -917,14 +912,6 @@ struct ethtool_ops {
                                  struct ethtool_pauseparam*);
        int     (*set_pauseparam)(struct net_device *,
                                  struct ethtool_pauseparam*);
-       u32     (*get_rx_csum)(struct net_device *);
-       int     (*set_rx_csum)(struct net_device *, u32);
-       u32     (*get_tx_csum)(struct net_device *);
-       int     (*set_tx_csum)(struct net_device *, u32);
-       u32     (*get_sg)(struct net_device *);
-       int     (*set_sg)(struct net_device *, u32);
-       u32     (*get_tso)(struct net_device *);
-       int     (*set_tso)(struct net_device *, u32);
        void    (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
        void    (*get_strings)(struct net_device *, u32 stringset, u8 *);
        int     (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
@@ -932,10 +919,6 @@ struct ethtool_ops {
                                     struct ethtool_stats *, u64 *);
        int     (*begin)(struct net_device *);
        void    (*complete)(struct net_device *);
-       u32     (*get_ufo)(struct net_device *);
-       int     (*set_ufo)(struct net_device *, u32);
-       u32     (*get_flags)(struct net_device *);
-       int     (*set_flags)(struct net_device *, u32);
        u32     (*get_priv_flags)(struct net_device *);
        int     (*set_priv_flags)(struct net_device *, u32);
        int     (*get_sset_count)(struct net_device *, int);
@@ -944,12 +927,9 @@ struct ethtool_ops {
        int     (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
        int     (*flash_device)(struct net_device *, struct ethtool_flash *);
        int     (*reset)(struct net_device *, u32 *);
-       int     (*set_rx_ntuple)(struct net_device *,
-                                struct ethtool_rx_ntuple *);
-       int     (*get_rxfh_indir)(struct net_device *,
-                                 struct ethtool_rxfh_indir *);
-       int     (*set_rxfh_indir)(struct net_device *,
-                                 const struct ethtool_rxfh_indir *);
+       u32     (*get_rxfh_indir_size)(struct net_device *);
+       int     (*get_rxfh_indir)(struct net_device *, u32 *);
+       int     (*set_rxfh_indir)(struct net_device *, const u32 *);
        void    (*get_channels)(struct net_device *, struct ethtool_channels *);
        int     (*set_channels)(struct net_device *, struct ethtool_channels *);
        int     (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -1173,6 +1153,12 @@ struct ethtool_ops {
 
 #define        RX_CLS_FLOW_DISC        0xffffffffffffffffULL
 
+/* Special RX classification rule insert location values */
+#define RX_CLS_LOC_SPECIAL     0x80000000      /* flag */
+#define RX_CLS_LOC_ANY         0xffffffff
+#define RX_CLS_LOC_FIRST       0xfffffffe
+#define RX_CLS_LOC_LAST                0xfffffffd
+
 /* Reset flags */
 /* The reset() operation must clear the flags for the components which
  * were actually reset.  On successful return, the flags indicate the
index 0c4df261af7e6a32d5b5c80633fa6a643e2d6285..e0bc4ffb8e7f0ec42a916219ab02f43a112609d1 100644 (file)
@@ -393,8 +393,8 @@ struct inodes_stat_t {
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
 #include <linux/atomic.h>
+#include <linux/shrinker.h>
 
 #include <asm/byteorder.h>
 
@@ -1886,6 +1886,7 @@ extern struct dentry *mount_single(struct file_system_type *fs_type,
 extern struct dentry *mount_nodev(struct file_system_type *fs_type,
        int flags, void *data,
        int (*fill_super)(struct super_block *, void *, int));
+extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
 void generic_shutdown_super(struct super_block *sb);
 void kill_block_super(struct super_block *sb);
 void kill_anon_super(struct super_block *sb);
@@ -1941,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
 
 extern int current_umask(void);
 
index 96efa6794ea5293a59f40638d344005b2497f0bc..c3da42dd22baf17cbf853e6defaed7796bfba950 100644 (file)
@@ -172,6 +172,7 @@ enum {
        TRACE_EVENT_FL_FILTERED_BIT,
        TRACE_EVENT_FL_RECORDED_CMD_BIT,
        TRACE_EVENT_FL_CAP_ANY_BIT,
+       TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 };
 
 enum {
@@ -179,6 +180,7 @@ enum {
        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
        TRACE_EVENT_FL_RECORDED_CMD     = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+       TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 };
 
 struct ftrace_event_call {
index 61549b26ad6f4dd114250ac00b170a1f734fd21b..73c28dea10ae395f1a7a7f4a517f174578dde313 100644 (file)
@@ -85,6 +85,30 @@ enum {
 /* All generic netlink requests are serialized by a global lock.  */
 extern void genl_lock(void);
 extern void genl_unlock(void);
+#ifdef CONFIG_PROVE_LOCKING
+extern int lockdep_genl_is_held(void);
+#endif
+
+/**
+ * rcu_dereference_genl - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
+ */
+#define rcu_dereference_genl(p)                                        \
+       rcu_dereference_check(p, lockdep_genl_is_held())
+
+/**
+ * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds genl mutex.
+ */
+#define genl_dereference(p)                                    \
+       rcu_dereference_protected(p, lockdep_genl_is_held())
 
 #endif /* __KERNEL__ */
 
index 9de31bc98c8803bc96bac1f0751da3ca695bf8df..6d18f3531f180f401d35e8028b3395b339182c2e 100644 (file)
@@ -21,8 +21,6 @@
 #define dev_to_part(device)    container_of((device), struct hd_struct, __dev)
 #define disk_to_dev(disk)      (&(disk)->part0.__dev)
 #define part_to_dev(part)      (&((part)->__dev))
-#define alias_name(disk)       ((disk)->alias ? (disk)->alias : \
-                                                (disk)->disk_name)
 
 extern struct device_type part_type;
 extern struct kobject *block_depr;
@@ -60,7 +58,6 @@ enum {
 
 #define DISK_MAX_PARTS                 256
 #define DISK_NAME_LEN                  32
-#define ALIAS_LEN                      256
 
 #include <linux/major.h>
 #include <linux/device.h>
@@ -166,7 +163,6 @@ struct gendisk {
                                          * disks that can't be partitioned. */
 
        char disk_name[DISK_NAME_LEN];  /* name of major driver */
-       char *alias;                    /* alias name of disk */
        char *(*devnode)(struct gendisk *gd, mode_t *mode);
 
        unsigned int events;            /* supported events */
index 19644e0016bdbb4837e1fa8f28cd276797fd00a4..d9d6c868b86bc01226031d63ce5ee72eb44ebed6 100644 (file)
@@ -110,11 +110,6 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
 
 #define hugetlb_change_protection(vma, address, end, newprot)
 
-#ifndef HPAGE_MASK
-#define HPAGE_MASK     PAGE_MASK               /* Keep the compiler happy */
-#define HPAGE_SIZE     PAGE_SIZE
-#endif
-
 #endif /* !CONFIG_HUGETLB_PAGE */
 
 #define HUGETLB_ANON_FILE "anon_hugepage"
index a81bf6d23b3e6ad62333696032e0b49317115667..07d103a06d64a04e17e06a2b9011f773ef60d8f1 100644 (file)
@@ -432,9 +432,6 @@ void i2c_unlock_adapter(struct i2c_adapter *);
 /* Internal numbers to terminate lists */
 #define I2C_CLIENT_END         0xfffeU
 
-/* The numbers to use to set I2C bus address */
-#define ANY_I2C_BUS            0xffff
-
 /* Construct an I2C_CLIENT_END-terminated array of i2c addresses */
 #define I2C_ADDRS(addr, addrs...) \
        ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
index db20bd4fd16b9189534a0cc15aa6335925cd3ace..06b6ef60c821a43be68135d8fb4055c2425ff441 100644 (file)
@@ -79,6 +79,7 @@
 #define IFF_TX_SKB_SHARING     0x10000 /* The interface supports sharing
                                         * skbs on transmit */
 #define IFF_UNICAST_FLT        0x20000         /* Supports unicast filtering   */
+#define IFF_TEAM_PORT  0x40000         /* device used as team port */
 
 #define IF_GET_IFACE   0x0001          /* for querying only */
 #define IF_GET_PROTO   0x0002
index e473003e4bdadc05fa07aa4380debaed666665e3..56d907a2c80478d4c2932a3856662b59af2fdc1f 100644 (file)
@@ -79,6 +79,7 @@
 #define ETH_P_PAE      0x888E          /* Port Access Entity (IEEE 802.1X) */
 #define ETH_P_AOE      0x88A2          /* ATA over Ethernet            */
 #define ETH_P_8021AD   0x88A8          /* 802.1ad Service VLAN         */
+#define ETH_P_802_EX1  0x88B5          /* 802.1 Local Experimental 1.  */
 #define ETH_P_TIPC     0x88CA          /* TIPC                         */
 #define ETH_P_8021AH   0x88E7          /* 802.1ah Backbone Service Tag */
 #define ETH_P_1588     0x88F7          /* IEEE 1588 Timesync */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
new file mode 100644 (file)
index 0000000..828181f
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * include/linux/if_team.h - Network team device driver header
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_TEAM_H_
+#define _LINUX_IF_TEAM_H_
+
+#ifdef __KERNEL__
+
+struct team_pcpu_stats {
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     rx_multicast;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       struct u64_stats_sync   syncp;
+       u32                     rx_dropped;
+       u32                     tx_dropped;
+};
+
+struct team;
+
+struct team_port {
+       struct net_device *dev;
+       struct hlist_node hlist; /* node in hash list */
+       struct list_head list; /* node in ordinary list */
+       struct team *team;
+       int index;
+
+       /*
+        * A place for storing original values of the device before it
+        * become a port.
+        */
+       struct {
+               unsigned char dev_addr[MAX_ADDR_LEN];
+               unsigned int mtu;
+       } orig;
+
+       bool linkup;
+       u32 speed;
+       u8 duplex;
+
+       struct rcu_head rcu;
+};
+
+struct team_mode_ops {
+       int (*init)(struct team *team);
+       void (*exit)(struct team *team);
+       rx_handler_result_t (*receive)(struct team *team,
+                                      struct team_port *port,
+                                      struct sk_buff *skb);
+       bool (*transmit)(struct team *team, struct sk_buff *skb);
+       int (*port_enter)(struct team *team, struct team_port *port);
+       void (*port_leave)(struct team *team, struct team_port *port);
+       void (*port_change_mac)(struct team *team, struct team_port *port);
+};
+
+enum team_option_type {
+       TEAM_OPTION_TYPE_U32,
+       TEAM_OPTION_TYPE_STRING,
+};
+
+struct team_option {
+       struct list_head list;
+       const char *name;
+       enum team_option_type type;
+       int (*getter)(struct team *team, void *arg);
+       int (*setter)(struct team *team, void *arg);
+};
+
+struct team_mode {
+       struct list_head list;
+       const char *kind;
+       struct module *owner;
+       size_t priv_size;
+       const struct team_mode_ops *ops;
+};
+
+#define TEAM_PORT_HASHBITS 4
+#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
+
+#define TEAM_MODE_PRIV_LONGS 4
+#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
+
+struct team {
+       struct net_device *dev; /* associated netdevice */
+       struct team_pcpu_stats __percpu *pcpu_stats;
+
+       struct mutex lock; /* used for overall locking, e.g. port lists write */
+
+       /*
+        * port lists with port count
+        */
+       int port_count;
+       struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES];
+       struct list_head port_list;
+
+       struct list_head option_list;
+
+       const struct team_mode *mode;
+       struct team_mode_ops ops;
+       long mode_priv[TEAM_MODE_PRIV_LONGS];
+};
+
+static inline struct hlist_head *team_port_index_hash(struct team *team,
+                                                     int port_index)
+{
+       return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+}
+
+static inline struct team_port *team_get_port_by_index(struct team *team,
+                                                      int port_index)
+{
+       struct hlist_node *p;
+       struct team_port *port;
+       struct hlist_head *head = team_port_index_hash(team, port_index);
+
+       hlist_for_each_entry(port, p, head, hlist)
+               if (port->index == port_index)
+                       return port;
+       return NULL;
+}
+static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
+                                                          int port_index)
+{
+       struct hlist_node *p;
+       struct team_port *port;
+       struct hlist_head *head = team_port_index_hash(team, port_index);
+
+       hlist_for_each_entry_rcu(port, p, head, hlist)
+               if (port->index == port_index)
+                       return port;
+       return NULL;
+}
+
+extern int team_port_set_team_mac(struct team_port *port);
+extern int team_options_register(struct team *team,
+                                const struct team_option *option,
+                                size_t option_count);
+extern void team_options_unregister(struct team *team,
+                                   const struct team_option *option,
+                                   size_t option_count);
+extern int team_mode_register(struct team_mode *mode);
+extern int team_mode_unregister(struct team_mode *mode);
+
+#endif /* __KERNEL__ */
+
+#define TEAM_STRING_MAX_LEN 32
+
+/**********************************
+ * NETLINK_GENERIC netlink family.
+ **********************************/
+
+enum {
+       TEAM_CMD_NOOP,
+       TEAM_CMD_OPTIONS_SET,
+       TEAM_CMD_OPTIONS_GET,
+       TEAM_CMD_PORT_LIST_GET,
+
+       __TEAM_CMD_MAX,
+       TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1),
+};
+
+enum {
+       TEAM_ATTR_UNSPEC,
+       TEAM_ATTR_TEAM_IFINDEX,         /* u32 */
+       TEAM_ATTR_LIST_OPTION,          /* nest */
+       TEAM_ATTR_LIST_PORT,            /* nest */
+
+       __TEAM_ATTR_MAX,
+       TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1,
+};
+
+/* Nested layout of get/set msg:
+ *
+ *     [TEAM_ATTR_LIST_OPTION]
+ *             [TEAM_ATTR_ITEM_OPTION]
+ *                     [TEAM_ATTR_OPTION_*], ...
+ *             [TEAM_ATTR_ITEM_OPTION]
+ *                     [TEAM_ATTR_OPTION_*], ...
+ *             ...
+ *     [TEAM_ATTR_LIST_PORT]
+ *             [TEAM_ATTR_ITEM_PORT]
+ *                     [TEAM_ATTR_PORT_*], ...
+ *             [TEAM_ATTR_ITEM_PORT]
+ *                     [TEAM_ATTR_PORT_*], ...
+ *             ...
+ */
+
+enum {
+       TEAM_ATTR_ITEM_OPTION_UNSPEC,
+       TEAM_ATTR_ITEM_OPTION,          /* nest */
+
+       __TEAM_ATTR_ITEM_OPTION_MAX,
+       TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1,
+};
+
+enum {
+       TEAM_ATTR_OPTION_UNSPEC,
+       TEAM_ATTR_OPTION_NAME,          /* string */
+       TEAM_ATTR_OPTION_CHANGED,       /* flag */
+       TEAM_ATTR_OPTION_TYPE,          /* u8 */
+       TEAM_ATTR_OPTION_DATA,          /* dynamic */
+
+       __TEAM_ATTR_OPTION_MAX,
+       TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
+};
+
+enum {
+       TEAM_ATTR_ITEM_PORT_UNSPEC,
+       TEAM_ATTR_ITEM_PORT,            /* nest */
+
+       __TEAM_ATTR_ITEM_PORT_MAX,
+       TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1,
+};
+
+enum {
+       TEAM_ATTR_PORT_UNSPEC,
+       TEAM_ATTR_PORT_IFINDEX,         /* u32 */
+       TEAM_ATTR_PORT_CHANGED,         /* flag */
+       TEAM_ATTR_PORT_LINKUP,          /* flag */
+       TEAM_ATTR_PORT_SPEED,           /* u32 */
+       TEAM_ATTR_PORT_DUPLEX,          /* u8 */
+
+       __TEAM_ATTR_PORT_MAX,
+       TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
+};
+
+/*
+ * NETLINK_GENERIC related info
+ */
+#define TEAM_GENL_NAME "team"
+#define TEAM_GENL_VERSION 0x1
+#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event"
+
+#endif /* _LINUX_IF_TEAM_H_ */
index 12d5543b14f25198d5754582ed49a6f0cc0913cb..13aff1e2183baeb7b2493b6d50cc75184d908ef1 100644 (file)
@@ -74,22 +74,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-/* if this changes, algorithm will have to be reworked because this
- * depends on completely exhausting the VLAN identifier space.  Thus
- * it gives constant time look-up, but in many cases it wastes memory.
- */
-#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
-#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
-
-struct vlan_group {
-       struct net_device       *real_dev; /* The ethernet(like) device
-                                           * the vlan is attached to.
-                                           */
-       unsigned int            nr_vlans;
-       struct hlist_node       hlist;  /* linked list */
-       struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
-       struct rcu_head         rcu;
-};
+struct vlan_info;
 
 static inline int is_vlan_dev(struct net_device *dev)
 {
@@ -109,6 +94,13 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
 extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
 extern struct sk_buff *vlan_untag(struct sk_buff *skb);
 
+extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
+
+extern int vlan_vids_add_by_dev(struct net_device *dev,
+                               const struct net_device *by_dev);
+extern void vlan_vids_del_by_dev(struct net_device *dev,
+                                const struct net_device *by_dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -139,6 +131,26 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
 {
        return skb;
 }
+
+static inline int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+       return 0;
+}
+
+static inline void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+}
+
+static inline int vlan_vids_add_by_dev(struct net_device *dev,
+                                      const struct net_device *by_dev)
+{
+       return 0;
+}
+
+static inline void vlan_vids_del_by_dev(struct net_device *dev,
+                                       const struct net_device *by_dev)
+{
+}
 #endif
 
 /**
@@ -310,6 +322,40 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
 
        return protocol;
 }
+
+static inline void vlan_set_encap_proto(struct sk_buff *skb,
+                                       struct vlan_hdr *vhdr)
+{
+       __be16 proto;
+       unsigned char *rawp;
+
+       /*
+        * Was a VLAN packet, grab the encapsulated protocol, which the layer
+        * three protocols care about.
+        */
+
+       proto = vhdr->h_vlan_encapsulated_proto;
+       if (ntohs(proto) >= 1536) {
+               skb->protocol = proto;
+               return;
+       }
+
+       rawp = skb->data;
+       if (*(unsigned short *) rawp == 0xFFFF)
+               /*
+                * This is a magic hack to spot IPX packets. Older Novell
+                * breaks the protocol design and runs IPX over 802.3 without
+                * an 802.2 LLC layer. We look for FFFF which isn't a used
+                * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
+                * but does for the rest.
+                */
+               skb->protocol = htons(ETH_P_802_3);
+       else
+               /*
+                * Real 802.2 LLC
+                */
+               skb->protocol = htons(ETH_P_802_2);
+}
 #endif /* __KERNEL__ */
 
 /* VLAN IOCTLs are found in sockios.h */
@@ -352,7 +398,7 @@ struct vlan_ioctl_args {
                unsigned int skb_priority;
                unsigned int name_type;
                unsigned int bind_type;
-               unsigned int flag; /* Matches vlan_dev_info flags */
+               unsigned int flag; /* Matches vlan_dev_priv flags */
         } u;
 
        short vlan_qos;   
index 80b480c97532f25801c952fef3d515df907b9c8e..34e8d52c192509ec3908bd74376410f7170a8628 100644 (file)
@@ -22,7 +22,7 @@ struct inet_diag_sockid {
 
 /* Request structure */
 
-struct inet_diag_req {
+struct inet_diag_req_compat {
        __u8    idiag_family;           /* Family of addresses. */
        __u8    idiag_src_len;
        __u8    idiag_dst_len;
@@ -34,6 +34,15 @@ struct inet_diag_req {
        __u32   idiag_dbs;              /* Tables to dump (NI) */
 };
 
+struct inet_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u8    idiag_ext;
+       __u8    pad;
+       __u32   idiag_states;
+       struct inet_diag_sockid id;
+};
+
 enum {
        INET_DIAG_REQ_NONE,
        INET_DIAG_REQ_BYTECODE,
@@ -98,9 +107,11 @@ enum {
        INET_DIAG_VEGASINFO,
        INET_DIAG_CONG,
        INET_DIAG_TOS,
+       INET_DIAG_TCLASS,
+       INET_DIAG_SKMEMINFO,
 };
 
-#define INET_DIAG_MAX INET_DIAG_TOS
+#define INET_DIAG_MAX INET_DIAG_SKMEMINFO
 
 
 /* INET_DIAG_MEM */
@@ -124,16 +135,41 @@ struct tcpvegas_info {
 #ifdef __KERNEL__
 struct sock;
 struct inet_hashinfo;
+struct nlattr;
+struct nlmsghdr;
+struct sk_buff;
+struct netlink_callback;
 
 struct inet_diag_handler {
-       struct inet_hashinfo    *idiag_hashinfo;
+       void                    (*dump)(struct sk_buff *skb,
+                                       struct netlink_callback *cb,
+                                       struct inet_diag_req *r,
+                                       struct nlattr *bc);
+
+       int                     (*dump_one)(struct sk_buff *in_skb,
+                                       const struct nlmsghdr *nlh,
+                                       struct inet_diag_req *req);
+
        void                    (*idiag_get_info)(struct sock *sk,
                                                  struct inet_diag_msg *r,
                                                  void *info);
-       __u16                   idiag_info_size;
        __u16                   idiag_type;
 };
 
+struct inet_connection_sock;
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             const struct nlmsghdr *unlh);
+void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *r,
+               struct nlattr *bc);
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+               struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req);
+
+int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
 extern void inet_diag_unregister(const struct inet_diag_handler *handler);
 #endif /* __KERNEL__ */
index 08ffab01e76c357cc47b378fd07f2895561f6cc3..32574eef93941bab73a9b43138cd8a67511101ec 100644 (file)
@@ -126,6 +126,8 @@ extern struct cred init_cred;
 # define INIT_PERF_EVENTS(tsk)
 #endif
 
+#define INIT_TASK_COMM "swapper"
+
 /*
  *  INIT_TASK is used to set up the first task table, touch at
  * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +164,7 @@ extern struct cred init_cred;
        .group_leader   = &tsk,                                         \
        RCU_INIT_POINTER(.real_cred, &init_cred),                       \
        RCU_INIT_POINTER(.cred, &init_cred),                            \
-       .comm           = "swapper",                                    \
+       .comm           = INIT_TASK_COMM,                               \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
        .files          = &init_files,                                  \
@@ -184,7 +186,6 @@ extern struct cred init_cred;
                [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),            \
        },                                                              \
        .thread_group   = LIST_HEAD_INIT(tsk.thread_group),             \
-       .dirties = INIT_PROP_LOCAL_SINGLE(dirties),                     \
        INIT_IDS                                                        \
        INIT_PERF_EVENTS(tsk)                                           \
        INIT_TRACE_IRQFLAGS                                             \
index 0c997767429a79db596a25cb8611355fdb29c6d9..6318268dcaf5c93cef691ea922c5d598fcbda749 100644 (file)
@@ -404,7 +404,7 @@ struct tcp6_sock {
 
 extern int inet6_sk_rebuild_header(struct sock *sk);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
 {
        return inet_sk(__sk)->pinet6;
@@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
 #define inet6_rcv_saddr(__sk)  NULL
 #define tcp_twsk_ipv6only(__sk)                0
 #define inet_v6_ipv6only(__sk)         0
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
        (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)   && \
index f47fcd30273dd2ce904b6ac0cf11fb963c04826c..68e67e50d028e95681f37e47ba98d2e047b01b8b 100644 (file)
@@ -555,9 +555,9 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_PPC_SMT 64
 #define KVM_CAP_PPC_RMA        65
 #define KVM_CAP_MAX_VCPUS 66       /* returns max vcpus per vm */
-#define KVM_CAP_PPC_HIOR 67
 #define KVM_CAP_PPC_PAPR 68
 #define KVM_CAP_S390_GMAP 71
+#define KVM_CAP_TSC_DEADLINE_TIMER 72
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index f549056fb20bd5533555918cc1b1f9805c2cdcc3..87f402ccec55567330943ab774ffb12ae21c7da8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
 #define br_lock_init(name)     name##_lock_init()
 
 #define DEFINE_LGLOCK(name)                                            \
                                                                        \
+ DEFINE_SPINLOCK(name##_cpu_lock);                                     \
+ cpumask_t name##_cpus __read_mostly;                                  \
  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
  DEFINE_LGLOCK_LOCKDEP(name);                                          \
                                                                        \
+ static int                                                            \
+ name##_lg_cpu_callback(struct notifier_block *nb,                     \
+                               unsigned long action, void *hcpu)       \
+ {                                                                     \
+       switch (action & ~CPU_TASKS_FROZEN) {                           \
+       case CPU_UP_PREPARE:                                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_set((unsigned long)hcpu, name##_cpus);              \
+               spin_unlock(&name##_cpu_lock);                          \
+               break;                                                  \
+       case CPU_UP_CANCELED: case CPU_DEAD:                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_clear((unsigned long)hcpu, name##_cpus);            \
+               spin_unlock(&name##_cpu_lock);                          \
+       }                                                               \
+       return NOTIFY_OK;                                               \
+ }                                                                     \
+ static struct notifier_block name##_lg_cpu_notifier = {               \
+       .notifier_call = name##_lg_cpu_callback,                        \
+ };                                                                    \
  void name##_lock_init(void) {                                         \
        int i;                                                          \
        LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
                lock = &per_cpu(name##_lock, i);                        \
                *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
        }                                                               \
+       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
+       get_online_cpus();                                              \
+       for_each_online_cpu(i)                                          \
+               cpu_set(i, name##_cpus);                                \
+       put_online_cpus();                                              \
  }                                                                     \
  EXPORT_SYMBOL(name##_lock_init);                                      \
                                                                        \
                                                                        \
  void name##_global_lock_online(void) {                                        \
        int i;                                                          \
-       preempt_disable();                                              \
+       spin_lock(&name##_cpu_lock);                                    \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock_online(void) {                              \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
        }                                                               \
-       preempt_enable();                                               \
+       spin_unlock(&name##_cpu_lock);                                  \
  }                                                                     \
  EXPORT_SYMBOL(name##_global_unlock_online);                           \
                                                                        \
index ff9abff55aa0ebac51d86b2457f3d08e6e09428a..90b0656a869e319ba17b3537faffad1774ad2db7 100644 (file)
@@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap)
        return ipv4_is_loopback(sin->sin_addr.s_addr);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
        const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
@@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 
        return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
 }
-#else  /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#else  /* IS_ENABLED(CONFIG_IPV6) */
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
        return 0;
 }
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Ensure incoming requests are from local privileged callers.
index 25b808631cd92c50d10cf6a31b2d9b9942b62ac9..fd7ff3d91e6a920ff084beca09d10b5b9abba981 100644 (file)
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 #define rounddown_pow_of_two(n)                        \
 (                                              \
        __builtin_constant_p(n) ? (             \
-               (n == 1) ? 0 :                  \
                (1UL << ilog2(n))) :            \
        __rounddown_pow_of_two(n)               \
  )
index 0fe00cd4c93ca3b26fb8841b6becfd8ef10a02cd..76f52bbbb2f430b822524b4e60ef90fa05a4f476 100644 (file)
@@ -32,6 +32,8 @@ struct mdiobb_ops {
 
 struct mdiobb_ctrl {
        const struct mdiobb_ops *ops;
+       /* reset callback */
+       int (*reset)(struct mii_bus *bus);
 };
 
 /* The returned bus is not yet registered with the phy layer. */
index e9d3fdfe41d71a9dc42c1b456a928e93e049bbc7..7c9fe3c2be73a771d53fbf4208832203032cfa07 100644 (file)
@@ -20,6 +20,8 @@ struct mdio_gpio_platform_data {
 
        unsigned int phy_mask;
        int irqs[PHY_MAX_ADDR];
+       /* reset callback */
+       int (*reset)(struct mii_bus *bus);
 };
 
 #endif /* __LINUX_MDIO_GPIO_H */
index b87068a1a09ef84ece916a4a722ebf70e921d1c6..9b296ea41bb85a5543c915dad41748c59fdd1734 100644 (file)
@@ -85,6 +85,9 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
+extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+
 static inline
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 {
@@ -381,5 +384,25 @@ mem_cgroup_print_bad_page(struct page *page)
 }
 #endif
 
+enum {
+       UNDER_LIMIT,
+       SOFT_LIMIT,
+       OVER_LIMIT,
+};
+
+#ifdef CONFIG_INET
+struct sock;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+void sock_update_memcg(struct sock *sk);
+void sock_release_memcg(struct sock *sk);
+#else
+static inline void sock_update_memcg(struct sock *sk)
+{
+}
+static inline void sock_release_memcg(struct sock *sk)
+{
+}
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#endif /* CONFIG_INET */
 #endif /* _LINUX_MEMCONTROL_H */
 
index 82b4c8801a4fc673fc4c7c0451bdb11330c821c7..8bf2cb9502dd7ba492680d371279577392c6d3fe 100644 (file)
 
 
 /*Registers VDD1, VDD2 voltage values definitions */
-#define VDD1_2_NUM_VOLTS                               73
+#define VDD1_2_NUM_VOLT_FINE                           73
+#define VDD1_2_NUM_VOLT_COARSE                         3
 #define VDD1_2_MIN_VOLT                                        6000
 #define VDD1_2_OFFSET                                  125
 
index 27748230aa69440e9b6d728a61a2b9ac425ac9c0..2783eca629a04c69ef334021711e5fcba602e97d 100644 (file)
@@ -9,6 +9,7 @@
 #define __LINUX_MII_H__
 
 #include <linux/types.h>
+#include <linux/ethtool.h>
 
 /* Generic MII registers. */
 #define MII_BMCR               0x00    /* Basic mode control register */
@@ -239,6 +240,205 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
        return 0;
 }
 
+/**
+ * ethtool_adv_to_mii_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
+{
+       u32 result = 0;
+
+       if (ethadv & ADVERTISED_10baseT_Half)
+               result |= ADVERTISE_10HALF;
+       if (ethadv & ADVERTISED_10baseT_Full)
+               result |= ADVERTISE_10FULL;
+       if (ethadv & ADVERTISED_100baseT_Half)
+               result |= ADVERTISE_100HALF;
+       if (ethadv & ADVERTISED_100baseT_Full)
+               result |= ADVERTISE_100FULL;
+       if (ethadv & ADVERTISED_Pause)
+               result |= ADVERTISE_PAUSE_CAP;
+       if (ethadv & ADVERTISED_Asym_Pause)
+               result |= ADVERTISE_PAUSE_ASYM;
+
+       return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_t
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to ethtool advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
+{
+       u32 result = 0;
+
+       if (adv & ADVERTISE_10HALF)
+               result |= ADVERTISED_10baseT_Half;
+       if (adv & ADVERTISE_10FULL)
+               result |= ADVERTISED_10baseT_Full;
+       if (adv & ADVERTISE_100HALF)
+               result |= ADVERTISED_100baseT_Half;
+       if (adv & ADVERTISE_100FULL)
+               result |= ADVERTISED_100baseT_Full;
+       if (adv & ADVERTISE_PAUSE_CAP)
+               result |= ADVERTISED_Pause;
+       if (adv & ADVERTISE_PAUSE_ASYM)
+               result |= ADVERTISED_Asym_Pause;
+
+       return result;
+}
+
+/**
+ * ethtool_adv_to_mii_ctrl1000_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
+{
+       u32 result = 0;
+
+       if (ethadv & ADVERTISED_1000baseT_Half)
+               result |= ADVERTISE_1000HALF;
+       if (ethadv & ADVERTISED_1000baseT_Full)
+               result |= ADVERTISE_1000FULL;
+
+       return result;
+}
+
+/**
+ * mii_ctrl1000_to_ethtool_adv_t
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
+{
+       u32 result = 0;
+
+       if (adv & ADVERTISE_1000HALF)
+               result |= ADVERTISED_1000baseT_Half;
+       if (adv & ADVERTISE_1000FULL)
+               result |= ADVERTISED_1000baseT_Full;
+
+       return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-T mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
+{
+       u32 result = 0;
+
+       if (lpa & LPA_LPACK)
+               result |= ADVERTISED_Autoneg;
+
+       return result | mii_adv_to_ethtool_adv_t(lpa);
+}
+
+/**
+ * mii_stat1000_to_ethtool_lpa_t
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
+{
+       u32 result = 0;
+
+       if (lpa & LPA_1000HALF)
+               result |= ADVERTISED_1000baseT_Half;
+       if (lpa & LPA_1000FULL)
+               result |= ADVERTISED_1000baseT_Full;
+
+       return result;
+}
+
+/**
+ * ethtool_adv_to_mii_adv_x
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000Base-X mode.
+ */
+static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
+{
+       u32 result = 0;
+
+       if (ethadv & ADVERTISED_1000baseT_Half)
+               result |= ADVERTISE_1000XHALF;
+       if (ethadv & ADVERTISED_1000baseT_Full)
+               result |= ADVERTISE_1000XFULL;
+       if (ethadv & ADVERTISED_Pause)
+               result |= ADVERTISE_1000XPAUSE;
+       if (ethadv & ADVERTISED_Asym_Pause)
+               result |= ADVERTISE_1000XPSE_ASYM;
+
+       return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_x
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-X mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
+{
+       u32 result = 0;
+
+       if (adv & ADVERTISE_1000XHALF)
+               result |= ADVERTISED_1000baseT_Half;
+       if (adv & ADVERTISE_1000XFULL)
+               result |= ADVERTISED_1000baseT_Full;
+       if (adv & ADVERTISE_1000XPAUSE)
+               result |= ADVERTISED_Pause;
+       if (adv & ADVERTISE_1000XPSE_ASYM)
+               result |= ADVERTISED_Asym_Pause;
+
+       return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_x
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-X mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+{
+       u32 result = 0;
+
+       if (lpa & LPA_LPACK)
+               result |= ADVERTISED_Autoneg;
+
+       return result | mii_adv_to_ethtool_adv_x(lpa);
+}
+
 /**
  * mii_advertise_flowctrl - get flow control advertisement flags
  * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
index b56e4587208d09cdf239f93c71e1ed92dc72a72e..9958ff2cad3c624f7f6c84457dffa71f217d5a44 100644 (file)
@@ -59,12 +59,15 @@ enum {
        MLX4_CMD_HW_HEALTH_CHECK = 0x50,
        MLX4_CMD_SET_PORT        = 0xc,
        MLX4_CMD_SET_NODE        = 0x5a,
+       MLX4_CMD_QUERY_FUNC      = 0x56,
        MLX4_CMD_ACCESS_DDR      = 0x2e,
        MLX4_CMD_MAP_ICM         = 0xffa,
        MLX4_CMD_UNMAP_ICM       = 0xff9,
        MLX4_CMD_MAP_ICM_AUX     = 0xffc,
        MLX4_CMD_UNMAP_ICM_AUX   = 0xffb,
        MLX4_CMD_SET_ICM_SIZE    = 0xffd,
+       /*master notify fw on finish for slave's flr*/
+       MLX4_CMD_INFORM_FLR_DONE = 0x5b,
 
        /* TPT commands */
        MLX4_CMD_SW2HW_MPT       = 0xd,
@@ -119,6 +122,26 @@ enum {
        /* miscellaneous commands */
        MLX4_CMD_DIAG_RPRT       = 0x30,
        MLX4_CMD_NOP             = 0x31,
+       MLX4_CMD_ACCESS_MEM      = 0x2e,
+       MLX4_CMD_SET_VEP         = 0x52,
+
+       /* Ethernet specific commands */
+       MLX4_CMD_SET_VLAN_FLTR   = 0x47,
+       MLX4_CMD_SET_MCAST_FLTR  = 0x48,
+       MLX4_CMD_DUMP_ETH_STATS  = 0x49,
+
+       /* Communication channel commands */
+       MLX4_CMD_ARM_COMM_CHANNEL = 0x57,
+       MLX4_CMD_GEN_EQE         = 0x58,
+
+       /* virtual commands */
+       MLX4_CMD_ALLOC_RES       = 0xf00,
+       MLX4_CMD_FREE_RES        = 0xf01,
+       MLX4_CMD_MCAST_ATTACH    = 0xf05,
+       MLX4_CMD_UCAST_ATTACH    = 0xf06,
+       MLX4_CMD_PROMISC         = 0xf08,
+       MLX4_CMD_QUERY_FUNC_CAP  = 0xf0a,
+       MLX4_CMD_QP_ATTACH       = 0xf0b,
 
        /* debug commands */
        MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
@@ -126,6 +149,7 @@ enum {
 
        /* statistics commands */
        MLX4_CMD_QUERY_IF_STAT   = 0X54,
+       MLX4_CMD_SET_IF_STAT     = 0X55,
 };
 
 enum {
@@ -135,7 +159,8 @@ enum {
 };
 
 enum {
-       MLX4_MAILBOX_SIZE       =  4096
+       MLX4_MAILBOX_SIZE       = 4096,
+       MLX4_ACCESS_MEM_ALIGN   = 256,
 };
 
 enum {
@@ -148,6 +173,11 @@ enum {
        MLX4_SET_PORT_GID_TABLE = 0x5,
 };
 
+enum {
+       MLX4_CMD_WRAPPED,
+       MLX4_CMD_NATIVE
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
@@ -157,23 +187,24 @@ struct mlx4_cmd_mailbox {
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
               int out_is_imm, u32 in_modifier, u8 op_modifier,
-              u16 op, unsigned long timeout);
+              u16 op, unsigned long timeout, int native);
 
 /* Invoke a command with no output parameter */
 static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier,
-                          u8 op_modifier, u16 op, unsigned long timeout)
+                          u8 op_modifier, u16 op, unsigned long timeout,
+                          int native)
 {
        return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 /* Invoke a command with an output mailbox */
 static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                               u32 in_modifier, u8 op_modifier, u16 op,
-                              unsigned long timeout)
+                              unsigned long timeout, int native)
 {
        return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 /*
@@ -183,13 +214,17 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param
  */
 static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                               u32 in_modifier, u8 op_modifier, u16 op,
-                              unsigned long timeout)
+                              unsigned long timeout, int native)
 {
        return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
 
+u32 mlx4_comm_get_version(void);
+
+#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+
 #endif /* MLX4_CMD_H */
index 84b0b1848f17180c3e14d264f017a17c4d041c79..5c4fe8e5bfe563669d9f2ef3b23eaa9ad14b1b7c 100644 (file)
@@ -47,6 +47,9 @@
 enum {
        MLX4_FLAG_MSI_X         = 1 << 0,
        MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
+       MLX4_FLAG_MASTER        = 1 << 2,
+       MLX4_FLAG_SLAVE         = 1 << 3,
+       MLX4_FLAG_SRIOV         = 1 << 4,
 };
 
 enum {
@@ -57,6 +60,15 @@ enum {
        MLX4_BOARD_ID_LEN = 64
 };
 
+enum {
+       MLX4_MAX_NUM_PF         = 16,
+       MLX4_MAX_NUM_VF         = 64,
+       MLX4_MFUNC_MAX          = 80,
+       MLX4_MFUNC_EQ_NUM       = 4,
+       MLX4_MFUNC_MAX_EQES     = 8,
+       MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
+};
+
 enum {
        MLX4_DEV_CAP_FLAG_RC            = 1LL <<  0,
        MLX4_DEV_CAP_FLAG_UC            = 1LL <<  1,
@@ -77,11 +89,13 @@ enum {
        MLX4_DEV_CAP_FLAG_IBOE          = 1LL << 30,
        MLX4_DEV_CAP_FLAG_UC_LOOPBACK   = 1LL << 32,
        MLX4_DEV_CAP_FLAG_FCS_KEEP      = 1LL << 34,
-       MLX4_DEV_CAP_FLAG_WOL           = 1LL << 38,
+       MLX4_DEV_CAP_FLAG_WOL_PORT1     = 1LL << 37,
+       MLX4_DEV_CAP_FLAG_WOL_PORT2     = 1LL << 38,
        MLX4_DEV_CAP_FLAG_UDP_RSS       = 1LL << 40,
        MLX4_DEV_CAP_FLAG_VEP_UC_STEER  = 1LL << 41,
        MLX4_DEV_CAP_FLAG_VEP_MC_STEER  = 1LL << 42,
-       MLX4_DEV_CAP_FLAG_COUNTERS      = 1LL << 48
+       MLX4_DEV_CAP_FLAG_COUNTERS      = 1LL << 48,
+       MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55
 };
 
 #define MLX4_ATTR_EXTENDED_PORT_INFO   cpu_to_be16(0xff90)
@@ -116,7 +130,11 @@ enum mlx4_event {
        MLX4_EVENT_TYPE_PORT_CHANGE        = 0x09,
        MLX4_EVENT_TYPE_EQ_OVERFLOW        = 0x0f,
        MLX4_EVENT_TYPE_ECC_DETECT         = 0x0e,
-       MLX4_EVENT_TYPE_CMD                = 0x0a
+       MLX4_EVENT_TYPE_CMD                = 0x0a,
+       MLX4_EVENT_TYPE_VEP_UPDATE         = 0x19,
+       MLX4_EVENT_TYPE_COMM_CHANNEL       = 0x18,
+       MLX4_EVENT_TYPE_FLR_EVENT          = 0x1c,
+       MLX4_EVENT_TYPE_NONE               = 0xff,
 };
 
 enum {
@@ -183,6 +201,7 @@ enum mlx4_qp_region {
 };
 
 enum mlx4_port_type {
+       MLX4_PORT_TYPE_NONE     = 0,
        MLX4_PORT_TYPE_IB       = 1,
        MLX4_PORT_TYPE_ETH      = 2,
        MLX4_PORT_TYPE_AUTO     = 3
@@ -215,6 +234,7 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
 
 struct mlx4_caps {
        u64                     fw_ver;
+       u32                     function;
        int                     num_ports;
        int                     vl_cap[MLX4_MAX_PORTS + 1];
        int                     ib_mtu_cap[MLX4_MAX_PORTS + 1];
@@ -229,6 +249,7 @@ struct mlx4_caps {
        u64                     trans_code[MLX4_MAX_PORTS + 1];
        int                     local_ca_ack_delay;
        int                     num_uars;
+       u32                     uar_page_size;
        int                     bf_reg_size;
        int                     bf_regs_per_page;
        int                     max_sq_sg;
@@ -252,8 +273,7 @@ struct mlx4_caps {
        int                     num_comp_vectors;
        int                     comp_pool;
        int                     num_mpts;
-       int                     num_mtt_segs;
-       int                     mtts_per_seg;
+       int                     num_mtts;
        int                     fmr_reserved_mtts;
        int                     reserved_mtts;
        int                     reserved_mrws;
@@ -283,7 +303,9 @@ struct mlx4_caps {
        int                     log_num_prios;
        enum mlx4_port_type     port_type[MLX4_MAX_PORTS + 1];
        u8                      supported_type[MLX4_MAX_PORTS + 1];
-       u32                     port_mask;
+       u8                      suggested_type[MLX4_MAX_PORTS + 1];
+       u8                      default_sense[MLX4_MAX_PORTS + 1];
+       u32                     port_mask[MLX4_MAX_PORTS + 1];
        enum mlx4_port_type     possible_type[MLX4_MAX_PORTS + 1];
        u32                     max_counters;
        u8                      ext_port_cap[MLX4_MAX_PORTS + 1];
@@ -303,7 +325,7 @@ struct mlx4_buf {
 };
 
 struct mlx4_mtt {
-       u32                     first_seg;
+       u32                     offset;
        int                     order;
        int                     page_shift;
 };
@@ -465,10 +487,12 @@ struct mlx4_counter {
 struct mlx4_dev {
        struct pci_dev         *pdev;
        unsigned long           flags;
+       unsigned long           num_slaves;
        struct mlx4_caps        caps;
        struct radix_tree_root  qp_table_tree;
        u8                      rev_id;
        char                    board_id[MLX4_BOARD_ID_LEN];
+       int                     num_vfs;
 };
 
 struct mlx4_init_port_param {
@@ -487,14 +511,32 @@ struct mlx4_init_port_param {
 
 #define mlx4_foreach_port(port, dev, type)                             \
        for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
-               if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
-                    ~(dev)->caps.port_mask) & 1 << ((port) - 1))
+               if ((type) == (dev)->caps.port_mask[(port)])
 
-#define mlx4_foreach_ib_transport_port(port, dev)                      \
-       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
-               if (((dev)->caps.port_mask & 1 << ((port) - 1)) ||      \
-                   ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+#define mlx4_foreach_ib_transport_port(port, dev)                         \
+       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)       \
+               if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
+                       ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
 
+static inline int mlx4_is_master(struct mlx4_dev *dev)
+{
+       return dev->flags & MLX4_FLAG_MASTER;
+}
+
+static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
+{
+       return (qpn < dev->caps.sqp_start + 8);
+}
+
+static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
+{
+       return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
+}
+
+static inline int mlx4_is_slave(struct mlx4_dev *dev)
+{
+       return dev->flags & MLX4_FLAG_SLAVE;
+}
 
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                   struct mlx4_buf *buf);
@@ -560,6 +602,10 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                       int block_mcast_loopback, enum mlx4_protocol prot);
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                       enum mlx4_protocol prot);
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol protocol);
 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
@@ -570,9 +616,11 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
 
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
index 48cc4cb9785856b5469e322250672b3e9a1a2449..bee8fa231276e2d29d7b4419b9ac92e8620fb2d8 100644 (file)
@@ -97,6 +97,33 @@ enum {
        MLX4_QP_BIT_RIC                         = 1 <<  4,
 };
 
+enum {
+       MLX4_RSS_HASH_XOR                       = 0,
+       MLX4_RSS_HASH_TOP                       = 1,
+
+       MLX4_RSS_UDP_IPV6                       = 1 << 0,
+       MLX4_RSS_UDP_IPV4                       = 1 << 1,
+       MLX4_RSS_TCP_IPV6                       = 1 << 2,
+       MLX4_RSS_IPV6                           = 1 << 3,
+       MLX4_RSS_TCP_IPV4                       = 1 << 4,
+       MLX4_RSS_IPV4                           = 1 << 5,
+
+       /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
+       MLX4_RSS_OFFSET_IN_QPC_PRI_PATH         = 0x24,
+       /* offset of being RSS indirection QP within mlx4_qp_context.flags */
+       MLX4_RSS_QPC_FLAG_OFFSET                = 13,
+};
+
+struct mlx4_rss_context {
+       __be32                  base_qpn;
+       __be32                  default_qpn;
+       u16                     reserved;
+       u8                      hash_fn;
+       u8                      flags;
+       __be32                  rss_key[10];
+       __be32                  base_qpn_udp;
+};
+
 struct mlx4_qp_path {
        u8                      fl;
        u8                      reserved1[2];
@@ -183,6 +210,7 @@ struct mlx4_wqe_ctrl_seg {
         * [4]   IP checksum
         * [3:2] C (generate completion queue entry)
         * [1]   SE (solicited event)
+        * [0]   FL (force loopback)
         */
        __be32                  srcrb_flags;
        /*
index 3dc3a8c2c4858a1d3400aa2d5fd029d36a1177c6..4baadd18f4ad3402f47fbd2ac919bafba519bed4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
+#include <linux/atomic.h>
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
index 415f2db414e17cd7c885b84e88b7b806746d3fe2..c8ef9bc54d500d0df052d4bd433e4b24d1855bb1 100644 (file)
@@ -218,6 +218,7 @@ struct mmc_card {
 #define MMC_QUIRK_INAND_CMD38  (1<<6)          /* iNAND devices have broken CMD38 */
 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7)          /* Avoid CMD23 for regular multiblock */
 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8)  /* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9)                /* Data read time > CSD says */
                                                /* byte mode */
        unsigned int    poweroff_notify_state;  /* eMMC4.5 notify feature */
 #define MMC_NO_POWER_NOTIFICATION      0
@@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
        return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
 }
 
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+       return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
 #define mmc_card_name(c)       ((c)->cid.prod_name)
 #define mmc_card_id(c)         (dev_name(&(c)->dev))
 
index a7003b7a695d26ac366ae9204349c87ea358b0d3..b188f68a08c90bf8689ea784e618378aa5c9e978 100644 (file)
@@ -116,6 +116,7 @@ enum {
        NDTPA_PROXY_DELAY,              /* u64, msecs */
        NDTPA_PROXY_QLEN,               /* u32 */
        NDTPA_LOCKTIME,                 /* u64, msecs */
+       NDTPA_QUEUE_LENBYTES,           /* u32 */
        __NDTPA_MAX
 };
 #define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
new file mode 100644 (file)
index 0000000..77f5202
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Network device features.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NETDEV_FEATURES_H
+#define _LINUX_NETDEV_FEATURES_H
+
+#include <linux/types.h>
+
+typedef u64 netdev_features_t;
+
+enum {
+       NETIF_F_SG_BIT,                 /* Scatter/gather IO. */
+       NETIF_F_IP_CSUM_BIT,            /* Can checksum TCP/UDP over IPv4. */
+       __UNUSED_NETIF_F_1,
+       NETIF_F_HW_CSUM_BIT,            /* Can checksum all the packets. */
+       NETIF_F_IPV6_CSUM_BIT,          /* Can checksum TCP/UDP over IPV6 */
+       NETIF_F_HIGHDMA_BIT,            /* Can DMA to high memory. */
+       NETIF_F_FRAGLIST_BIT,           /* Scatter/gather IO. */
+       NETIF_F_HW_VLAN_TX_BIT,         /* Transmit VLAN hw acceleration */
+       NETIF_F_HW_VLAN_RX_BIT,         /* Receive VLAN hw acceleration */
+       NETIF_F_HW_VLAN_FILTER_BIT,     /* Receive filtering on VLAN */
+       NETIF_F_VLAN_CHALLENGED_BIT,    /* Device cannot handle VLAN packets */
+       NETIF_F_GSO_BIT,                /* Enable software GSO. */
+       NETIF_F_LLTX_BIT,               /* LockLess TX - deprecated. Please */
+                                       /* do not use LLTX in new drivers */
+       NETIF_F_NETNS_LOCAL_BIT,        /* Does not change network namespaces */
+       NETIF_F_GRO_BIT,                /* Generic receive offload */
+       NETIF_F_LRO_BIT,                /* large receive offload */
+
+       /**/NETIF_F_GSO_SHIFT,          /* keep the order of SKB_GSO_* bits */
+       NETIF_F_TSO_BIT                 /* ... TCPv4 segmentation */
+               = NETIF_F_GSO_SHIFT,
+       NETIF_F_UFO_BIT,                /* ... UDPv4 fragmentation */
+       NETIF_F_GSO_ROBUST_BIT,         /* ... ->SKB_GSO_DODGY */
+       NETIF_F_TSO_ECN_BIT,            /* ... TCP ECN support */
+       NETIF_F_TSO6_BIT,               /* ... TCPv6 segmentation */
+       NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
+       NETIF_F_GSO_RESERVED1,          /* ... free (fill GSO_MASK to 8 bits) */
+       /**/NETIF_F_GSO_LAST,           /* [can't be last bit, see GSO_MASK] */
+       NETIF_F_GSO_RESERVED2           /* ... free (fill GSO_MASK to 8 bits) */
+               = NETIF_F_GSO_LAST,
+
+       NETIF_F_FCOE_CRC_BIT,           /* FCoE CRC32 */
+       NETIF_F_SCTP_CSUM_BIT,          /* SCTP checksum offload */
+       NETIF_F_FCOE_MTU_BIT,           /* Supports max FCoE MTU, 2158 bytes*/
+       NETIF_F_NTUPLE_BIT,             /* N-tuple filters supported */
+       NETIF_F_RXHASH_BIT,             /* Receive hashing offload */
+       NETIF_F_RXCSUM_BIT,             /* Receive checksumming offload */
+       NETIF_F_NOCACHE_COPY_BIT,       /* Use no-cache copyfromuser */
+       NETIF_F_LOOPBACK_BIT,           /* Enable loopback */
+
+       /*
+        * Add your fresh new feature above and remember to update
+        * netdev_features_strings[] in net/core/ethtool.c and maybe
+        * some feature mask #defines below. Please also describe it
+        * in Documentation/networking/netdev-features.txt.
+        */
+
+       /**/NETDEV_FEATURE_COUNT
+};
+
+/* copy'n'paste compression ;) */
+#define __NETIF_F_BIT(bit)     ((netdev_features_t)1 << (bit))
+#define __NETIF_F(name)                __NETIF_F_BIT(NETIF_F_##name##_BIT)
+
+#define NETIF_F_FCOE_CRC       __NETIF_F(FCOE_CRC)
+#define NETIF_F_FCOE_MTU       __NETIF_F(FCOE_MTU)
+#define NETIF_F_FRAGLIST       __NETIF_F(FRAGLIST)
+#define NETIF_F_FSO            __NETIF_F(FSO)
+#define NETIF_F_GRO            __NETIF_F(GRO)
+#define NETIF_F_GSO            __NETIF_F(GSO)
+#define NETIF_F_GSO_ROBUST     __NETIF_F(GSO_ROBUST)
+#define NETIF_F_HIGHDMA                __NETIF_F(HIGHDMA)
+#define NETIF_F_HW_CSUM                __NETIF_F(HW_CSUM)
+#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER)
+#define NETIF_F_HW_VLAN_RX     __NETIF_F(HW_VLAN_RX)
+#define NETIF_F_HW_VLAN_TX     __NETIF_F(HW_VLAN_TX)
+#define NETIF_F_IP_CSUM                __NETIF_F(IP_CSUM)
+#define NETIF_F_IPV6_CSUM      __NETIF_F(IPV6_CSUM)
+#define NETIF_F_LLTX           __NETIF_F(LLTX)
+#define NETIF_F_LOOPBACK       __NETIF_F(LOOPBACK)
+#define NETIF_F_LRO            __NETIF_F(LRO)
+#define NETIF_F_NETNS_LOCAL    __NETIF_F(NETNS_LOCAL)
+#define NETIF_F_NOCACHE_COPY   __NETIF_F(NOCACHE_COPY)
+#define NETIF_F_NTUPLE         __NETIF_F(NTUPLE)
+#define NETIF_F_RXCSUM         __NETIF_F(RXCSUM)
+#define NETIF_F_RXHASH         __NETIF_F(RXHASH)
+#define NETIF_F_SCTP_CSUM      __NETIF_F(SCTP_CSUM)
+#define NETIF_F_SG             __NETIF_F(SG)
+#define NETIF_F_TSO6           __NETIF_F(TSO6)
+#define NETIF_F_TSO_ECN                __NETIF_F(TSO_ECN)
+#define NETIF_F_TSO            __NETIF_F(TSO)
+#define NETIF_F_UFO            __NETIF_F(UFO)
+#define NETIF_F_VLAN_CHALLENGED        __NETIF_F(VLAN_CHALLENGED)
+
+/* Features valid for ethtool to change */
+/* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE   (NETIF_F_VLAN_CHALLENGED | \
+                                NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+
+/* remember that ((t)1 << t_BITS) is undefined in C99 */
+#define NETIF_F_ETHTOOL_BITS   ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
+               (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \
+               ~NETIF_F_NEVER_CHANGE)
+
+/* Segmentation offload feature mask */
+#define NETIF_F_GSO_MASK       (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
+               __NETIF_F_BIT(NETIF_F_GSO_SHIFT))
+
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+                                NETIF_F_TSO6 | NETIF_F_UFO)
+
+#define NETIF_F_GEN_CSUM       NETIF_F_HW_CSUM
+#define NETIF_F_V4_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
+#define NETIF_F_V6_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
+#define NETIF_F_ALL_CSUM       (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+
+#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+                                NETIF_F_FSO)
+
+/*
+ * If one device supports one of these features, then enable them
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ONE_FOR_ALL    (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
+                                NETIF_F_SG | NETIF_F_HIGHDMA |         \
+                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+/*
+ * If one device doesn't support one of these features, then disable it
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ALL_FOR_ALL    (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
+
+/* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES  (NETIF_F_GSO | NETIF_F_GRO)
+
+#endif /* _LINUX_NETDEV_FEATURES_H */
index cbeb5867cff79d7d70952cc6285063c3feb92e7c..a776a675c0e5038d8a53931875dd48accd07276a 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/rculist.h>
 #include <linux/dmaengine.h>
 #include <linux/workqueue.h>
+#include <linux/dynamic_queue_limits.h>
 
 #include <linux/ethtool.h>
 #include <net/net_namespace.h>
 #ifdef CONFIG_DCB
 #include <net/dcbnl.h>
 #endif
+#include <net/netprio_cgroup.h>
+
+#include <linux/netdev_features.h>
 
-struct vlan_group;
 struct netpoll_info;
 struct phy_device;
 /* 802.11 specific */
@@ -141,22 +144,20 @@ static inline bool dev_xmit_complete(int rc)
  *     used.
  */
 
-#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
 # if defined(CONFIG_MAC80211_MESH)
 #  define LL_MAX_HEADER 128
 # else
 #  define LL_MAX_HEADER 96
 # endif
-#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#elif IS_ENABLED(CONFIG_TR)
 # define LL_MAX_HEADER 48
 #else
 # define LL_MAX_HEADER 32
 #endif
 
-#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
-    !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
-    !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
-    !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
+#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
+    !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
 #define MAX_HEADER LL_MAX_HEADER
 #else
 #define MAX_HEADER (LL_MAX_HEADER + 48)
@@ -212,6 +213,11 @@ enum {
 #include <linux/cache.h>
 #include <linux/skbuff.h>
 
+#ifdef CONFIG_RPS
+#include <linux/jump_label.h>
+extern struct jump_label_key rps_needed;
+#endif
+
 struct neighbour;
 struct neigh_parms;
 struct sk_buff;
@@ -272,16 +278,11 @@ struct hh_cache {
  *
  * We could use other alignment values, but we must maintain the
  * relationship HH alignment <= LL alignment.
- *
- * LL_ALLOCATED_SPACE also takes into account the tailroom the device
- * may need.
  */
 #define LL_RESERVED_SPACE(dev) \
        ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
        ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
-#define LL_ALLOCATED_SPACE(dev) \
-       ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 
 struct header_ops {
        int     (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -516,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n)
 #endif
 
 enum netdev_queue_state_t {
-       __QUEUE_STATE_XOFF,
+       __QUEUE_STATE_DRV_XOFF,
+       __QUEUE_STATE_STACK_XOFF,
        __QUEUE_STATE_FROZEN,
-#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)          | \
-                                   (1 << __QUEUE_STATE_FROZEN))
+#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)            | \
+                             (1 << __QUEUE_STATE_STACK_XOFF))
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF           | \
+                                       (1 << __QUEUE_STATE_FROZEN))
 };
+/*
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
+ * netif_tx_* functions below are used to manipulate this flag.  The
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
+ * queue independently.  The netif_xmit_*stopped functions below are called
+ * to check if the queue has been stopped by the driver or stack (either
+ * of the XOFF bits are set in the state).  Drivers should not need to call
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
+ */
 
 struct netdev_queue {
 /*
@@ -528,9 +541,8 @@ struct netdev_queue {
  */
        struct net_device       *dev;
        struct Qdisc            *qdisc;
-       unsigned long           state;
        struct Qdisc            *qdisc_sleeping;
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        struct kobject          kobj;
 #endif
 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -545,6 +557,18 @@ struct netdev_queue {
         * please use this field instead of dev->trans_start
         */
        unsigned long           trans_start;
+
+       /*
+        * Number of TX timeouts for this queue
+        * (/sys/class/net/DEV/Q/trans_timeout)
+        */
+       unsigned long           trans_timeout;
+
+       unsigned long           state;
+
+#ifdef CONFIG_BQL
+       struct dql              dql;
+#endif
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -573,7 +597,7 @@ struct rps_map {
        struct rcu_head rcu;
        u16 cpus[0];
 };
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
 
 /*
  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
@@ -597,7 +621,7 @@ struct rps_dev_flow_table {
        struct rps_dev_flow flows[0];
 };
 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
-    (_num * sizeof(struct rps_dev_flow)))
+    ((_num) * sizeof(struct rps_dev_flow)))
 
 /*
  * The rps_sock_flow_table contains mappings of flows to the last CPU
@@ -608,7 +632,7 @@ struct rps_sock_flow_table {
        u16 ents[0];
 };
 #define        RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
-    (_num * sizeof(u16)))
+    ((_num) * sizeof(u16)))
 
 #define RPS_NO_CPU 0xffff
 
@@ -660,7 +684,7 @@ struct xps_map {
        struct rcu_head rcu;
        u16 queues[0];
 };
-#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))   \
     / sizeof(u16))
 
@@ -767,11 +791,11 @@ struct netdev_tc_txq {
  *     3. Update dev->stats asynchronously and atomically, and define
  *        neither operation.
  *
- * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  *     If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *     this function is called when a VLAN id is registered.
  *
- * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  *     If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *     this function is called when a VLAN id is unregistered.
  *
@@ -845,12 +869,13 @@ struct netdev_tc_txq {
  *     Called to release previously enslaved netdev.
  *
  *      Feature/offload setting functions.
- * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *             netdev_features_t features);
  *     Adjusts the requested feature flags according to device-specific
  *     constraints, and returns the resulting flags. Must not modify
  *     the device state.
  *
- * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *     Called to update device configuration to new features. Passed
  *     feature set might be less than what was returned by ndo_fix_features()).
  *     Must return >0 or -errno if it changed dev->features itself.
@@ -885,9 +910,9 @@ struct net_device_ops {
                                                     struct rtnl_link_stats64 *storage);
        struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 
-       void                    (*ndo_vlan_rx_add_vid)(struct net_device *dev,
+       int                     (*ndo_vlan_rx_add_vid)(struct net_device *dev,
                                                       unsigned short vid);
-       void                    (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+       int                     (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
                                                        unsigned short vid);
 #ifdef CONFIG_NET_POLL_CONTROLLER
        void                    (*ndo_poll_controller)(struct net_device *dev);
@@ -912,7 +937,7 @@ struct net_device_ops {
        int                     (*ndo_get_vf_port)(struct net_device *dev,
                                                   int vf, struct sk_buff *skb);
        int                     (*ndo_setup_tc)(struct net_device *dev, u8 tc);
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
        int                     (*ndo_fcoe_enable)(struct net_device *dev);
        int                     (*ndo_fcoe_disable)(struct net_device *dev);
        int                     (*ndo_fcoe_ddp_setup)(struct net_device *dev,
@@ -927,7 +952,7 @@ struct net_device_ops {
                                                       unsigned int sgc);
 #endif
 
-#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE)
+#if IS_ENABLED(CONFIG_LIBFCOE)
 #define NETDEV_FCOE_WWNN 0
 #define NETDEV_FCOE_WWPN 1
        int                     (*ndo_fcoe_get_wwn)(struct net_device *dev,
@@ -944,10 +969,12 @@ struct net_device_ops {
                                                 struct net_device *slave_dev);
        int                     (*ndo_del_slave)(struct net_device *dev,
                                                 struct net_device *slave_dev);
-       u32                     (*ndo_fix_features)(struct net_device *dev,
-                                                   u32 features);
+       netdev_features_t       (*ndo_fix_features)(struct net_device *dev,
+                                                   netdev_features_t features);
        int                     (*ndo_set_features)(struct net_device *dev,
-                                                   u32 features);
+                                                   netdev_features_t features);
+       int                     (*ndo_neigh_construct)(struct neighbour *n);
+       void                    (*ndo_neigh_destroy)(struct neighbour *n);
 };
 
 /*
@@ -997,91 +1024,13 @@ struct net_device {
        struct list_head        unreg_list;
 
        /* currently active device features */
-       u32                     features;
+       netdev_features_t       features;
        /* user-changeable features */
-       u32                     hw_features;
+       netdev_features_t       hw_features;
        /* user-requested features */
-       u32                     wanted_features;
+       netdev_features_t       wanted_features;
        /* mask of features inheritable by VLAN devices */
-       u32                     vlan_features;
-
-       /* Net device feature bits; if you change something,
-        * also update netdev_features_strings[] in ethtool.c */
-
-#define NETIF_F_SG             1       /* Scatter/gather IO. */
-#define NETIF_F_IP_CSUM                2       /* Can checksum TCP/UDP over IPv4. */
-#define NETIF_F_NO_CSUM                4       /* Does not require checksum. F.e. loopack. */
-#define NETIF_F_HW_CSUM                8       /* Can checksum all the packets. */
-#define NETIF_F_IPV6_CSUM      16      /* Can checksum TCP/UDP over IPV6 */
-#define NETIF_F_HIGHDMA                32      /* Can DMA to high memory. */
-#define NETIF_F_FRAGLIST       64      /* Scatter/gather IO. */
-#define NETIF_F_HW_VLAN_TX     128     /* Transmit VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_RX     256     /* Receive VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_FILTER 512     /* Receive filtering on VLAN */
-#define NETIF_F_VLAN_CHALLENGED        1024    /* Device cannot handle VLAN packets */
-#define NETIF_F_GSO            2048    /* Enable software GSO. */
-#define NETIF_F_LLTX           4096    /* LockLess TX - deprecated. Please */
-                                       /* do not use LLTX in new drivers */
-#define NETIF_F_NETNS_LOCAL    8192    /* Does not change network namespaces */
-#define NETIF_F_GRO            16384   /* Generic receive offload */
-#define NETIF_F_LRO            32768   /* large receive offload */
-
-/* the GSO_MASK reserves bits 16 through 23 */
-#define NETIF_F_FCOE_CRC       (1 << 24) /* FCoE CRC32 */
-#define NETIF_F_SCTP_CSUM      (1 << 25) /* SCTP checksum offload */
-#define NETIF_F_FCOE_MTU       (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
-#define NETIF_F_NTUPLE         (1 << 27) /* N-tuple filters supported */
-#define NETIF_F_RXHASH         (1 << 28) /* Receive hashing offload */
-#define NETIF_F_RXCSUM         (1 << 29) /* Receive checksumming offload */
-#define NETIF_F_NOCACHE_COPY   (1 << 30) /* Use no-cache copyfromuser */
-#define NETIF_F_LOOPBACK       (1 << 31) /* Enable loopback */
-
-       /* Segmentation offload features */
-#define NETIF_F_GSO_SHIFT      16
-#define NETIF_F_GSO_MASK       0x00ff0000
-#define NETIF_F_TSO            (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_UFO            (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
-#define NETIF_F_GSO_ROBUST     (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO_ECN                (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO6           (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_FSO            (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
-
-       /* Features valid for ethtool to change */
-       /* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE   (NETIF_F_VLAN_CHALLENGED | \
-                                 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
-#define NETIF_F_ETHTOOL_BITS   (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
-
-       /* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
-                                NETIF_F_TSO6 | NETIF_F_UFO)
-
-
-#define NETIF_F_GEN_CSUM       (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
-#define NETIF_F_V4_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
-#define NETIF_F_V6_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
-#define NETIF_F_ALL_CSUM       (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
-
-#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-
-#define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
-                                NETIF_F_FSO)
-
-       /*
-        * If one device supports one of these features, then enable them
-        * for all in netdev_increment_features.
-        */
-#define NETIF_F_ONE_FOR_ALL    (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
-                                NETIF_F_SG | NETIF_F_HIGHDMA |         \
-                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
-       /*
-        * If one device doesn't support one of these features, then disable it
-        * for all in netdev_increment_features.
-        */
-#define NETIF_F_ALL_FOR_ALL    (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
-
-       /* changeable features with no special hardware requirements */
-#define NETIF_F_SOFT_FEATURES  (NETIF_F_GSO | NETIF_F_GRO)
+       netdev_features_t       vlan_features;
 
        /* Interface index. Unique device identifier    */
        int                     ifindex;
@@ -1132,6 +1081,7 @@ struct net_device {
        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
        unsigned char           addr_assign_type; /* hw address assignment type */
        unsigned char           addr_len;       /* hardware address length      */
+       unsigned char           neigh_priv_len;
        unsigned short          dev_id;         /* for shared network cards */
 
        spinlock_t              addr_list_lock;
@@ -1144,11 +1094,11 @@ struct net_device {
 
        /* Protocol specific pointers */
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-       struct vlan_group __rcu *vlgrp;         /* VLAN group */
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+       struct vlan_info __rcu  *vlan_info;     /* VLAN info */
 #endif
-#ifdef CONFIG_NET_DSA
-       void                    *dsa_ptr;       /* dsa specific data */
+#if IS_ENABLED(CONFIG_NET_DSA)
+       struct dsa_switch_tree  *dsa_ptr;       /* dsa specific data */
 #endif
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
@@ -1184,9 +1134,11 @@ struct net_device {
 
        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
 
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        struct kset             *queues_kset;
+#endif
 
+#ifdef CONFIG_RPS
        struct netdev_rx_queue  *_rx;
 
        /* Number of RX queues allocated at register_netdev() time */
@@ -1308,9 +1260,12 @@ struct net_device {
        struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
        u8 prio_tc_map[TC_BITMASK + 1];
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
        /* max exchange id for FCoE LRO by ddp */
        unsigned int            fcoe_ddp_xid;
+#endif
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+       struct netprio_map __rcu *priomap;
 #endif
        /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
@@ -1515,7 +1470,7 @@ struct packet_type {
                                         struct packet_type *,
                                         struct net_device *);
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
-                                               u32 features);
+                                               netdev_features_t features);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
@@ -1783,7 +1738,7 @@ extern void __netif_schedule(struct Qdisc *q);
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
-       if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+       if (!(txq->state & QUEUE_STATE_ANY_XOFF))
                __netif_schedule(txq->qdisc);
 }
 
@@ -1797,7 +1752,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
 
 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 {
-       clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1829,7 +1784,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
                return;
        }
 #endif
-       if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
                __netif_schedule(dev_queue->qdisc);
 }
 
@@ -1861,7 +1816,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
                pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
                return;
        }
-       set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1888,7 +1843,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
 
 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
-       return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1902,9 +1857,68 @@ static inline int netif_queue_stopped(const struct net_device *dev)
        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 }
 
-static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
 {
-       return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
+       return dev_queue->state & QUEUE_STATE_ANY_XOFF;
+}
+
+static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+{
+       return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
+}
+
+static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+                                       unsigned int bytes)
+{
+#ifdef CONFIG_BQL
+       dql_queued(&dev_queue->dql, bytes);
+       if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
+               set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+               if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+                       clear_bit(__QUEUE_STATE_STACK_XOFF,
+                           &dev_queue->state);
+       }
+#endif
+}
+
+static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
+{
+       netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
+}
+
+static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
+                                            unsigned pkts, unsigned bytes)
+{
+#ifdef CONFIG_BQL
+       if (likely(bytes)) {
+               dql_completed(&dev_queue->dql, bytes);
+               if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
+                   &dev_queue->state) &&
+                   dql_avail(&dev_queue->dql) >= 0)) {
+                       if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
+                            &dev_queue->state))
+                               netif_schedule_queue(dev_queue);
+               }
+       }
+#endif
+}
+
+static inline void netdev_completed_queue(struct net_device *dev,
+                                         unsigned pkts, unsigned bytes)
+{
+       netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
+}
+
+static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+{
+#ifdef CONFIG_BQL
+       dql_reset(&q->dql);
+#endif
+}
+
+static inline void netdev_reset_queue(struct net_device *dev_queue)
+{
+       netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
 }
 
 /**
@@ -1991,7 +2005,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
        if (netpoll_trap())
                return;
 #endif
-       if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
                __netif_schedule(txq->qdisc);
 }
 
@@ -2520,7 +2534,8 @@ extern int                netdev_set_master(struct net_device *dev, struct net_device *master)
 extern int netdev_set_bond_master(struct net_device *dev,
                                  struct net_device *master);
 extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+       netdev_features_t features);
 #ifdef CONFIG_BUG
 extern void netdev_rx_csum_fault(struct net_device *dev);
 #else
@@ -2536,6 +2551,8 @@ extern void               net_disable_timestamp(void);
 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
 extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+                           const struct seq_operations *ops);
 #endif
 
 extern int netdev_class_create_file(struct class_attribute *class_attr);
@@ -2547,11 +2564,13 @@ extern const char *netdev_drivername(const struct net_device *dev);
 
 extern void linkwatch_run_queue(void);
 
-static inline u32 netdev_get_wanted_features(struct net_device *dev)
+static inline netdev_features_t netdev_get_wanted_features(
+       struct net_device *dev)
 {
        return (dev->features & ~dev->hw_features) | dev->wanted_features;
 }
-u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+netdev_features_t netdev_increment_features(netdev_features_t all,
+       netdev_features_t one, netdev_features_t mask);
 int __netdev_update_features(struct net_device *dev);
 void netdev_update_features(struct net_device *dev);
 void netdev_change_features(struct net_device *dev);
@@ -2559,21 +2578,31 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-u32 netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
-static inline int net_gso_ok(u32 features, int gso_type)
+static inline int net_gso_ok(netdev_features_t features, int gso_type)
 {
-       int feature = gso_type << NETIF_F_GSO_SHIFT;
+       netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+
+       /* check flags correspondence */
+       BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
+
        return (features & feature) == feature;
 }
 
-static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
+static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
 {
        return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
               (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 }
 
-static inline int netif_needs_gso(struct sk_buff *skb, int features)
+static inline int netif_needs_gso(struct sk_buff *skb,
+       netdev_features_t features)
 {
        return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
                unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@ -2592,22 +2621,6 @@ static inline int netif_is_bond_slave(struct net_device *dev)
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
-static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
-{
-       if (dev->features & NETIF_F_RXCSUM)
-               return 1;
-       if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
-               return 0;
-       return dev->ethtool_ops->get_rx_csum(dev);
-}
-
-static inline u32 dev_ethtool_get_flags(struct net_device *dev)
-{
-       if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
-               return 0;
-       return dev->ethtool_ops->get_flags(dev);
-}
-
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* netdev_printk helpers, similar to dev_printk */
index 857f5026ced65267ad77db343be0f486cb61a7d5..b809265607d0427ecfba54750af897b40609e5b6 100644 (file)
@@ -162,6 +162,24 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
 
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
+#if defined(CONFIG_JUMP_LABEL)
+#include <linux/jump_label.h>
+extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+       if (__builtin_constant_p(pf) &&
+           __builtin_constant_p(hook))
+               return static_branch(&nf_hooks_needed[pf][hook]);
+
+       return !list_empty(&nf_hooks[pf][hook]);
+}
+#else
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+       return !list_empty(&nf_hooks[pf][hook]);
+}
+#endif
+
 int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
                 struct net_device *indev, struct net_device *outdev,
                 int (*okfn)(struct sk_buff *), int thresh);
@@ -179,11 +197,9 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
                                 struct net_device *outdev,
                                 int (*okfn)(struct sk_buff *), int thresh)
 {
-#ifndef CONFIG_NETFILTER_DEBUG
-       if (list_empty(&nf_hooks[pf][hook]))
-               return 1;
-#endif
-       return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+       if (nf_hooks_active(pf, hook))
+               return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+       return 1;
 }
 
 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
index a1b410c76fc3019bc7357961053601231a9e9da9..e144f54185c019a01d2210791e54f1feb12b1586 100644 (file)
@@ -5,7 +5,9 @@ header-y += nf_conntrack_ftp.h
 header-y += nf_conntrack_sctp.h
 header-y += nf_conntrack_tcp.h
 header-y += nf_conntrack_tuple_common.h
+header-y += nf_nat.h
 header-y += nfnetlink.h
+header-y += nfnetlink_acct.h
 header-y += nfnetlink_compat.h
 header-y += nfnetlink_conntrack.h
 header-y += nfnetlink_log.h
@@ -21,6 +23,7 @@ header-y += xt_DSCP.h
 header-y += xt_IDLETIMER.h
 header-y += xt_LED.h
 header-y += xt_MARK.h
+header-y += xt_nfacct.h
 header-y += xt_NFLOG.h
 header-y += xt_NFQUEUE.h
 header-y += xt_RATEEST.h
@@ -40,6 +43,7 @@ header-y += xt_cpu.h
 header-y += xt_dccp.h
 header-y += xt_devgroup.h
 header-y += xt_dscp.h
+header-y += xt_ecn.h
 header-y += xt_esp.h
 header-y += xt_hashlimit.h
 header-y += xt_helper.h
index 0d3dd66322ecbb24529303f6634f36e5ce6f390d..9e3a2838291bfe6aee8f6b3e0d88b43ace455abf 100644 (file)
@@ -83,6 +83,10 @@ enum ip_conntrack_status {
        /* Conntrack is a fake untracked entry */
        IPS_UNTRACKED_BIT = 12,
        IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
+
+       /* Conntrack has a userspace helper. */
+       IPS_USERSPACE_HELPER_BIT = 13,
+       IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT),
 };
 
 /* Connection tracking event types */
index 2ea22b018a874ae3b6356ebeb63cfc20f45ddbe3..2f6bbc5b812543de9f3afefdba2a7d67ba581a02 100644 (file)
@@ -7,6 +7,33 @@ enum ip_conntrack_dir {
        IP_CT_DIR_MAX
 };
 
+/* The protocol-specific manipulable parts of the tuple: always in
+ * network order
+ */
+union nf_conntrack_man_proto {
+       /* Add other protocols here. */
+       __be16 all;
+
+       struct {
+               __be16 port;
+       } tcp;
+       struct {
+               __be16 port;
+       } udp;
+       struct {
+               __be16 id;
+       } icmp;
+       struct {
+               __be16 port;
+       } dccp;
+       struct {
+               __be16 port;
+       } sctp;
+       struct {
+               __be16 key;     /* GRE key is 32bit, PPtP only uses 16bit */
+       } gre;
+};
+
 #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
 
 #endif /* _NF_CONNTRACK_TUPLE_COMMON_H */
diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h
new file mode 100644 (file)
index 0000000..8df2d13
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _NETFILTER_NF_NAT_H
+#define _NETFILTER_NF_NAT_H
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_NAT_RANGE_MAP_IPS           1
+#define NF_NAT_RANGE_PROTO_SPECIFIED   2
+#define NF_NAT_RANGE_PROTO_RANDOM      4
+#define NF_NAT_RANGE_PERSISTENT                8
+
+struct nf_nat_ipv4_range {
+       unsigned int                    flags;
+       __be32                          min_ip;
+       __be32                          max_ip;
+       union nf_conntrack_man_proto    min;
+       union nf_conntrack_man_proto    max;
+};
+
+struct nf_nat_ipv4_multi_range_compat {
+       unsigned int                    rangesize;
+       struct nf_nat_ipv4_range        range[1];
+};
+
+#endif /* _NETFILTER_NF_NAT_H */
index 74d33861473ce5af0a3b6e7b4067d18d363db294..b64454c2f79f67aad7d18d4ff3168cbed7867b3e 100644 (file)
@@ -48,7 +48,8 @@ struct nfgenmsg {
 #define NFNL_SUBSYS_ULOG               4
 #define NFNL_SUBSYS_OSF                        5
 #define NFNL_SUBSYS_IPSET              6
-#define NFNL_SUBSYS_COUNT              7
+#define NFNL_SUBSYS_ACCT               7
+#define NFNL_SUBSYS_COUNT              8
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
new file mode 100644 (file)
index 0000000..7c4279b
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _NFNL_ACCT_H_
+#define _NFNL_ACCT_H_
+
+#ifndef NFACCT_NAME_MAX
+#define NFACCT_NAME_MAX                32
+#endif
+
+enum nfnl_acct_msg_types {
+       NFNL_MSG_ACCT_NEW,
+       NFNL_MSG_ACCT_GET,
+       NFNL_MSG_ACCT_GET_CTRZERO,
+       NFNL_MSG_ACCT_DEL,
+       NFNL_MSG_ACCT_MAX
+};
+
+enum nfnl_acct_type {
+       NFACCT_UNSPEC,
+       NFACCT_NAME,
+       NFACCT_PKTS,
+       NFACCT_BYTES,
+       NFACCT_USE,
+       __NFACCT_MAX
+};
+#define NFACCT_MAX (__NFACCT_MAX - 1)
+
+#ifdef __KERNEL__
+
+struct nf_acct;
+
+extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+extern void nfnl_acct_put(struct nf_acct *acct);
+extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+
+#endif /* __KERNEL__ */
+
+#endif /* _NFNL_ACCT_H */
index b56e76811c04380e9779dbe82c2cfa4a5b0c6abd..6390f0992f36f0723393d282c6d39d3f68abb12e 100644 (file)
@@ -3,7 +3,8 @@
 
 #include <linux/types.h>
 
-#define XT_CT_NOTRACK  0x1
+#define XT_CT_NOTRACK          0x1
+#define XT_CT_USERSPACE_HELPER 0x2
 
 struct xt_ct_target_info {
        __u16 flags;
diff --git a/include/linux/netfilter/xt_ecn.h b/include/linux/netfilter/xt_ecn.h
new file mode 100644 (file)
index 0000000..7158fca
--- /dev/null
@@ -0,0 +1,35 @@
+/* iptables module for matching the ECN header in IPv4 and TCP header
+ *
+ * (C) 2002 Harald Welte <laforge@gnumonks.org>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ * 
+ * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
+*/
+#ifndef _XT_ECN_H
+#define _XT_ECN_H
+
+#include <linux/types.h>
+#include <linux/netfilter/xt_dscp.h>
+
+#define XT_ECN_IP_MASK (~XT_DSCP_MASK)
+
+#define XT_ECN_OP_MATCH_IP     0x01
+#define XT_ECN_OP_MATCH_ECE    0x10
+#define XT_ECN_OP_MATCH_CWR    0x20
+
+#define XT_ECN_OP_MATCH_MASK   0xce
+
+/* match info */
+struct xt_ecn_info {
+       __u8 operation;
+       __u8 invert;
+       __u8 ip_ect;
+       union {
+               struct {
+                       __u8 ect;
+               } tcp;
+       } proto;
+};
+
+#endif /* _XT_ECN_H */
diff --git a/include/linux/netfilter/xt_nfacct.h b/include/linux/netfilter/xt_nfacct.h
new file mode 100644 (file)
index 0000000..3e19c8a
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _XT_NFACCT_MATCH_H
+#define _XT_NFACCT_MATCH_H
+
+#include <linux/netfilter/nfnetlink_acct.h>
+
+struct nf_acct;
+
+struct xt_nfacct_match_info {
+       char            name[NFACCT_NAME_MAX];
+       struct nf_acct  *nfacct;
+};
+
+#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h
new file mode 100644 (file)
index 0000000..8358d4f
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _XT_RPATH_H
+#define _XT_RPATH_H
+
+#include <linux/types.h>
+
+enum {
+       XT_RPFILTER_LOOSE = 1 << 0,
+       XT_RPFILTER_VALID_MARK = 1 << 1,
+       XT_RPFILTER_ACCEPT_LOCAL = 1 << 2,
+       XT_RPFILTER_INVERT = 1 << 3,
+#ifdef __KERNEL__
+       XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE |
+                                 XT_RPFILTER_VALID_MARK |
+                                 XT_RPFILTER_ACCEPT_LOCAL |
+                                 XT_RPFILTER_INVERT,
+#endif
+};
+
+struct xt_rpfilter_info {
+       __u8 flags;
+};
+
+#endif
index c3b45480ecf75e79c41b34e078732221204b0eec..f9930c87fff3a7beb35f558a788909ee1f27c5df 100644 (file)
@@ -12,4 +12,3 @@ header-y += ipt_ah.h
 header-y += ipt_ecn.h
 header-y += ipt_realm.h
 header-y += ipt_ttl.h
-header-y += nf_nat.h
index eabf95fb7d3e030c17a2f078ce8621e67d54b38a..0e0c063dbf60739173dae6d8d97cff16692cbf5d 100644 (file)
@@ -1,35 +1,15 @@
-/* iptables module for matching the ECN header in IPv4 and TCP header
- *
- * (C) 2002 Harald Welte <laforge@gnumonks.org>
- *
- * This software is distributed under GNU GPL v2, 1991
- * 
- * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
-*/
 #ifndef _IPT_ECN_H
 #define _IPT_ECN_H
 
-#include <linux/types.h>
-#include <linux/netfilter/xt_dscp.h>
+#include <linux/netfilter/xt_ecn.h>
+#define ipt_ecn_info xt_ecn_info
 
-#define IPT_ECN_IP_MASK        (~XT_DSCP_MASK)
-
-#define IPT_ECN_OP_MATCH_IP    0x01
-#define IPT_ECN_OP_MATCH_ECE   0x10
-#define IPT_ECN_OP_MATCH_CWR   0x20
-
-#define IPT_ECN_OP_MATCH_MASK  0xce
-
-/* match info */
-struct ipt_ecn_info {
-       __u8 operation;
-       __u8 invert;
-       __u8 ip_ect;
-       union {
-               struct {
-                       __u8 ect;
-               } tcp;
-       } proto;
+enum {
+       IPT_ECN_IP_MASK       = XT_ECN_IP_MASK,
+       IPT_ECN_OP_MATCH_IP   = XT_ECN_OP_MATCH_IP,
+       IPT_ECN_OP_MATCH_ECE  = XT_ECN_OP_MATCH_ECE,
+       IPT_ECN_OP_MATCH_CWR  = XT_ECN_OP_MATCH_CWR,
+       IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK,
 };
 
-#endif /* _IPT_ECN_H */
+#endif /* IPT_ECN_H */
diff --git a/include/linux/netfilter_ipv4/nf_nat.h b/include/linux/netfilter_ipv4/nf_nat.h
deleted file mode 100644 (file)
index 7a861d0..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _LINUX_NF_NAT_H
-#define _LINUX_NF_NAT_H
-
-#include <linux/types.h>
-
-#define IP_NAT_RANGE_MAP_IPS 1
-#define IP_NAT_RANGE_PROTO_SPECIFIED 2
-#define IP_NAT_RANGE_PROTO_RANDOM 4
-#define IP_NAT_RANGE_PERSISTENT 8
-
-/* The protocol-specific manipulable parts of the tuple. */
-union nf_conntrack_man_proto {
-       /* Add other protocols here. */
-       __be16 all;
-
-       struct {
-               __be16 port;
-       } tcp;
-       struct {
-               __be16 port;
-       } udp;
-       struct {
-               __be16 id;
-       } icmp;
-       struct {
-               __be16 port;
-       } dccp;
-       struct {
-               __be16 port;
-       } sctp;
-       struct {
-               __be16 key;     /* GRE key is 32bit, PPtP only uses 16bit */
-       } gre;
-};
-
-/* Single range specification. */
-struct nf_nat_range {
-       /* Set to OR of flags above. */
-       unsigned int flags;
-
-       /* Inclusive: network order. */
-       __be32 min_ip, max_ip;
-
-       /* Inclusive: network order */
-       union nf_conntrack_man_proto min, max;
-};
-
-/* For backwards compat: don't use in modern code. */
-struct nf_nat_multi_range_compat {
-       unsigned int rangesize; /* Must be 1. */
-
-       /* hangs off end. */
-       struct nf_nat_range range[1];
-};
-
-#define nf_nat_multi_range nf_nat_multi_range_compat
-
-#endif
index 8374d29673625db129e01ba4a13b29761d769688..52e48959cfa166058a997e95aeb53a9e7bd23820 100644 (file)
@@ -8,7 +8,7 @@
 #define NETLINK_UNUSED         1       /* Unused number                                */
 #define NETLINK_USERSOCK       2       /* Reserved for user mode socket protocols      */
 #define NETLINK_FIREWALL       3       /* Firewalling hook                             */
-#define NETLINK_INET_DIAG      4       /* INET socket monitoring                       */
+#define NETLINK_SOCK_DIAG      4       /* socket monitoring                            */
 #define NETLINK_NFLOG          5       /* netfilter/iptables ULOG */
 #define NETLINK_XFRM           6       /* ipsec */
 #define NETLINK_SELINUX                7       /* SELinux event notifications */
@@ -27,6 +27,8 @@
 #define NETLINK_RDMA           20
 #define NETLINK_CRYPTO         21      /* Crypto layer */
 
+#define NETLINK_INET_DIAG      NETLINK_SOCK_DIAG
+
 #define MAX_LINKS 32           
 
 struct sockaddr_nl {
index ab2c6343361ae3e82f242df7880f292c189e442d..92ecf5585facf20511cd865db281d33ea828bdad 100644 (file)
@@ -410,6 +410,9 @@ extern const struct inode_operations nfs_file_inode_operations;
 extern const struct inode_operations nfs3_file_inode_operations;
 #endif /* CONFIG_NFS_V3 */
 extern const struct file_operations nfs_file_operations;
+#ifdef CONFIG_NFS_V4
+extern const struct file_operations nfs4_file_operations;
+#endif /* CONFIG_NFS_V4 */
 extern const struct address_space_operations nfs_file_aops;
 extern const struct address_space_operations nfs_dir_aops;
 
index c74595ba70947926d2b1a99f4157ca7f485dbd13..2a7c533be5dd8aa2f584fde1faad94c558130d75 100644 (file)
@@ -1192,6 +1192,7 @@ struct nfs_rpc_ops {
        const struct dentry_operations *dentry_ops;
        const struct inode_operations *dir_inode_ops;
        const struct inode_operations *file_inode_ops;
+       const struct file_operations *file_ops;
 
        int     (*getroot) (struct nfs_server *, struct nfs_fh *,
                            struct nfs_fsinfo *);
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
new file mode 100644 (file)
index 0000000..eb1efa5
--- /dev/null
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _LINUX_OPENVSWITCH_H
+#define _LINUX_OPENVSWITCH_H 1
+
+#include <linux/types.h>
+
+/**
+ * struct ovs_header - header for OVS Generic Netlink messages.
+ * @dp_ifindex: ifindex of local port for datapath (0 to make a request not
+ * specific to a datapath).
+ *
+ * Attributes following the header are specific to a particular OVS Generic
+ * Netlink family, but all of the OVS families use this header.
+ */
+
+struct ovs_header {
+       int dp_ifindex;
+};
+
+/* Datapaths. */
+
+#define OVS_DATAPATH_FAMILY  "ovs_datapath"
+#define OVS_DATAPATH_MCGROUP "ovs_datapath"
+#define OVS_DATAPATH_VERSION 0x1
+
+enum ovs_datapath_cmd {
+       OVS_DP_CMD_UNSPEC,
+       OVS_DP_CMD_NEW,
+       OVS_DP_CMD_DEL,
+       OVS_DP_CMD_GET,
+       OVS_DP_CMD_SET
+};
+
+/**
+ * enum ovs_datapath_attr - attributes for %OVS_DP_* commands.
+ * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local
+ * port".  This is the name of the network device whose dp_ifindex is given in
+ * the &struct ovs_header.  Always present in notifications.  Required in
+ * %OVS_DP_NEW requests.  May be used as an alternative to specifying
+ * dp_ifindex in other requests (with a dp_ifindex of 0).
+ * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially
+ * set on the datapath port (for OVS_ACTION_ATTR_MISS).  Only valid on
+ * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
+ * not be sent.
+ * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
+ * datapath.  Always present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_DP_* commands.
+ */
+enum ovs_datapath_attr {
+       OVS_DP_ATTR_UNSPEC,
+       OVS_DP_ATTR_NAME,       /* name of dp_ifindex netdev */
+       OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
+       OVS_DP_ATTR_STATS,      /* struct ovs_dp_stats */
+       __OVS_DP_ATTR_MAX
+};
+
+#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1)
+
+struct ovs_dp_stats {
+       __u64 n_hit;             /* Number of flow table matches. */
+       __u64 n_missed;          /* Number of flow table misses. */
+       __u64 n_lost;            /* Number of misses not sent to userspace. */
+       __u64 n_flows;           /* Number of flows present */
+};
+
+struct ovs_vport_stats {
+       __u64   rx_packets;             /* total packets received       */
+       __u64   tx_packets;             /* total packets transmitted    */
+       __u64   rx_bytes;               /* total bytes received         */
+       __u64   tx_bytes;               /* total bytes transmitted      */
+       __u64   rx_errors;              /* bad packets received         */
+       __u64   tx_errors;              /* packet transmit problems     */
+       __u64   rx_dropped;             /* no space in linux buffers    */
+       __u64   tx_dropped;             /* no space available in linux  */
+};
+
+/* Fixed logical ports. */
+#define OVSP_LOCAL      ((__u16)0)
+
+/* Packet transfer. */
+
+#define OVS_PACKET_FAMILY "ovs_packet"
+#define OVS_PACKET_VERSION 0x1
+
+enum ovs_packet_cmd {
+       OVS_PACKET_CMD_UNSPEC,
+
+       /* Kernel-to-user notifications. */
+       OVS_PACKET_CMD_MISS,    /* Flow table miss. */
+       OVS_PACKET_CMD_ACTION,  /* OVS_ACTION_ATTR_USERSPACE action. */
+
+       /* Userspace commands. */
+       OVS_PACKET_CMD_EXECUTE  /* Apply actions to a packet. */
+};
+
+/**
+ * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands.
+ * @OVS_PACKET_ATTR_PACKET: Present for all notifications.  Contains the entire
+ * packet as received, from the start of the Ethernet header onward.  For
+ * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by
+ * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is
+ * the flow key extracted from the packet as originally received.
+ * @OVS_PACKET_ATTR_KEY: Present for all notifications.  Contains the flow key
+ * extracted from the packet as nested %OVS_KEY_ATTR_* attributes.  This allows
+ * userspace to adapt its flow setup strategy by comparing its notion of the
+ * flow key against the kernel's.
+ * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet.  Used
+ * for %OVS_PACKET_CMD_EXECUTE.  It has nested %OVS_ACTION_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
+ * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
+ * %OVS_USERSPACE_ATTR_USERDATA attribute.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_PACKET_* commands.
+ */
+enum ovs_packet_attr {
+       OVS_PACKET_ATTR_UNSPEC,
+       OVS_PACKET_ATTR_PACKET,      /* Packet data. */
+       OVS_PACKET_ATTR_KEY,         /* Nested OVS_KEY_ATTR_* attributes. */
+       OVS_PACKET_ATTR_ACTIONS,     /* Nested OVS_ACTION_ATTR_* attributes. */
+       OVS_PACKET_ATTR_USERDATA,    /* u64 OVS_ACTION_ATTR_USERSPACE arg. */
+       __OVS_PACKET_ATTR_MAX
+};
+
+#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1)
+
+/* Virtual ports. */
+
+#define OVS_VPORT_FAMILY  "ovs_vport"
+#define OVS_VPORT_MCGROUP "ovs_vport"
+#define OVS_VPORT_VERSION 0x1
+
+enum ovs_vport_cmd {
+       OVS_VPORT_CMD_UNSPEC,
+       OVS_VPORT_CMD_NEW,
+       OVS_VPORT_CMD_DEL,
+       OVS_VPORT_CMD_GET,
+       OVS_VPORT_CMD_SET
+};
+
+enum ovs_vport_type {
+       OVS_VPORT_TYPE_UNSPEC,
+       OVS_VPORT_TYPE_NETDEV,   /* network device */
+       OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
+       __OVS_VPORT_TYPE_MAX
+};
+
+#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1)
+
+/**
+ * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands.
+ * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath.
+ * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type
+ * of vport.
+ * @OVS_VPORT_ATTR_NAME: Name of vport.  For a vport based on a network device
+ * this is the name of the network device.  Maximum length %IFNAMSIZ-1 bytes
+ * plus a null terminator.
+ * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information.
+ * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that
+ * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on
+ * this port.  A value of zero indicates that upcalls should not be sent.
+ * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
+ * packets sent or received through the vport.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_VPORT_* commands.
+ *
+ * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and
+ * %OVS_VPORT_ATTR_NAME attributes are required.  %OVS_VPORT_ATTR_PORT_NO is
+ * optional; if not specified a free port number is automatically selected.
+ * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type
+ * of vport.
+ * and other attributes are ignored.
+ *
+ * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to
+ * look up the vport to operate on; otherwise dp_idx from the &struct
+ * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport.
+ */
+enum ovs_vport_attr {
+       OVS_VPORT_ATTR_UNSPEC,
+       OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */
+       OVS_VPORT_ATTR_TYPE,    /* u32 OVS_VPORT_TYPE_* constant. */
+       OVS_VPORT_ATTR_NAME,    /* string name, up to IFNAMSIZ bytes long */
+       OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */
+       OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */
+       OVS_VPORT_ATTR_STATS,   /* struct ovs_vport_stats */
+       __OVS_VPORT_ATTR_MAX
+};
+
+#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+
+/* Flows. */
+
+#define OVS_FLOW_FAMILY  "ovs_flow"
+#define OVS_FLOW_MCGROUP "ovs_flow"
+#define OVS_FLOW_VERSION 0x1
+
+enum ovs_flow_cmd {
+       OVS_FLOW_CMD_UNSPEC,
+       OVS_FLOW_CMD_NEW,
+       OVS_FLOW_CMD_DEL,
+       OVS_FLOW_CMD_GET,
+       OVS_FLOW_CMD_SET
+};
+
+struct ovs_flow_stats {
+       __u64 n_packets;         /* Number of matched packets. */
+       __u64 n_bytes;           /* Number of matched bytes. */
+};
+
+enum ovs_key_attr {
+       OVS_KEY_ATTR_UNSPEC,
+       OVS_KEY_ATTR_ENCAP,     /* Nested set of encapsulated attributes. */
+       OVS_KEY_ATTR_PRIORITY,  /* u32 skb->priority */
+       OVS_KEY_ATTR_IN_PORT,   /* u32 OVS dp port number */
+       OVS_KEY_ATTR_ETHERNET,  /* struct ovs_key_ethernet */
+       OVS_KEY_ATTR_VLAN,      /* be16 VLAN TCI */
+       OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */
+       OVS_KEY_ATTR_IPV4,      /* struct ovs_key_ipv4 */
+       OVS_KEY_ATTR_IPV6,      /* struct ovs_key_ipv6 */
+       OVS_KEY_ATTR_TCP,       /* struct ovs_key_tcp */
+       OVS_KEY_ATTR_UDP,       /* struct ovs_key_udp */
+       OVS_KEY_ATTR_ICMP,      /* struct ovs_key_icmp */
+       OVS_KEY_ATTR_ICMPV6,    /* struct ovs_key_icmpv6 */
+       OVS_KEY_ATTR_ARP,       /* struct ovs_key_arp */
+       OVS_KEY_ATTR_ND,        /* struct ovs_key_nd */
+       __OVS_KEY_ATTR_MAX
+};
+
+#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
+
+/**
+ * enum ovs_frag_type - IPv4 and IPv6 fragment type
+ * @OVS_FRAG_TYPE_NONE: Packet is not a fragment.
+ * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0.
+ * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset.
+ *
+ * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct
+ * ovs_key_ipv6.
+ */
+enum ovs_frag_type {
+       OVS_FRAG_TYPE_NONE,
+       OVS_FRAG_TYPE_FIRST,
+       OVS_FRAG_TYPE_LATER,
+       __OVS_FRAG_TYPE_MAX
+};
+
+#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1)
+
+struct ovs_key_ethernet {
+       __u8     eth_src[6];
+       __u8     eth_dst[6];
+};
+
+struct ovs_key_ipv4 {
+       __be32 ipv4_src;
+       __be32 ipv4_dst;
+       __u8   ipv4_proto;
+       __u8   ipv4_tos;
+       __u8   ipv4_ttl;
+       __u8   ipv4_frag;       /* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_ipv6 {
+       __be32 ipv6_src[4];
+       __be32 ipv6_dst[4];
+       __be32 ipv6_label;      /* 20-bits in least-significant bits. */
+       __u8   ipv6_proto;
+       __u8   ipv6_tclass;
+       __u8   ipv6_hlimit;
+       __u8   ipv6_frag;       /* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_tcp {
+       __be16 tcp_src;
+       __be16 tcp_dst;
+};
+
+struct ovs_key_udp {
+       __be16 udp_src;
+       __be16 udp_dst;
+};
+
+struct ovs_key_icmp {
+       __u8 icmp_type;
+       __u8 icmp_code;
+};
+
+struct ovs_key_icmpv6 {
+       __u8 icmpv6_type;
+       __u8 icmpv6_code;
+};
+
+struct ovs_key_arp {
+       __be32 arp_sip;
+       __be32 arp_tip;
+       __be16 arp_op;
+       __u8   arp_sha[6];
+       __u8   arp_tha[6];
+};
+
+struct ovs_key_nd {
+       __u32 nd_target[4];
+       __u8  nd_sll[6];
+       __u8  nd_tll[6];
+};
+
+/**
+ * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
+ * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
+ * key.  Always present in notifications.  Required for all requests (except
+ * dumps).
+ * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying
+ * the actions to take for packets that match the key.  Always present in
+ * notifications.  Required for %OVS_FLOW_CMD_NEW requests, optional for
+ * %OVS_FLOW_CMD_SET requests.
+ * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this
+ * flow.  Present in notifications if the stats would be nonzero.  Ignored in
+ * requests.
+ * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the
+ * TCP flags seen on packets in this flow.  Only present in notifications for
+ * TCP flows, and only if it would be nonzero.  Ignored in requests.
+ * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on
+ * the system monotonic clock, at which a packet was last processed for this
+ * flow.  Only present in notifications if a packet has been processed for this
+ * flow.  Ignored in requests.
+ * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the
+ * last-used time, accumulated TCP flags, and statistics for this flow.
+ * Otherwise ignored in requests.  Never present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_FLOW_* commands.
+ */
+enum ovs_flow_attr {
+       OVS_FLOW_ATTR_UNSPEC,
+       OVS_FLOW_ATTR_KEY,       /* Sequence of OVS_KEY_ATTR_* attributes. */
+       OVS_FLOW_ATTR_ACTIONS,   /* Nested OVS_ACTION_ATTR_* attributes. */
+       OVS_FLOW_ATTR_STATS,     /* struct ovs_flow_stats. */
+       OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */
+       OVS_FLOW_ATTR_USED,      /* u64 msecs last used in monotonic time. */
+       OVS_FLOW_ATTR_CLEAR,     /* Flag to clear stats, tcp_flags, used. */
+       __OVS_FLOW_ATTR_MAX
+};
+
+#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
+
+/**
+ * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
+ * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
+ * @OVS_ACTION_ATTR_SAMPLE.  A value of 0 samples no packets, a value of
+ * %UINT32_MAX samples all packets and intermediate values sample intermediate
+ * fractions of packets.
+ * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event.
+ * Actions are passed as nested attributes.
+ *
+ * Executes the specified actions with the given probability on a per-packet
+ * basis.
+ */
+enum ovs_sample_attr {
+       OVS_SAMPLE_ATTR_UNSPEC,
+       OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
+       OVS_SAMPLE_ATTR_ACTIONS,     /* Nested OVS_ACTION_ATTR_* attributes. */
+       __OVS_SAMPLE_ATTR_MAX,
+};
+
+#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1)
+
+/**
+ * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
+ * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
+ * message should be sent.  Required.
+ * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the
+ * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA,
+ */
+enum ovs_userspace_attr {
+       OVS_USERSPACE_ATTR_UNSPEC,
+       OVS_USERSPACE_ATTR_PID,       /* u32 Netlink PID to receive upcalls. */
+       OVS_USERSPACE_ATTR_USERDATA,  /* u64 optional user-specified cookie. */
+       __OVS_USERSPACE_ATTR_MAX
+};
+
+#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
+
+/**
+ * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
+ * @vlan_tpid: Tag protocol identifier (TPID) to push.
+ * @vlan_tci: Tag control identifier (TCI) to push.  The CFI bit must be set
+ * (but it will not be set in the 802.1Q header that is pushed).
+ *
+ * The @vlan_tpid value is typically %ETH_P_8021Q.  The only acceptable TPID
+ * values are those that the kernel module also parses as 802.1Q headers, to
+ * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
+ * from having surprising results.
+ */
+struct ovs_action_push_vlan {
+       __be16 vlan_tpid;       /* 802.1Q TPID. */
+       __be16 vlan_tci;        /* 802.1Q TCI (VLAN ID and priority). */
+};
+
+/**
+ * enum ovs_action_attr - Action types.
+ *
+ * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
+ * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested
+ * %OVS_USERSPACE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header.  The
+ * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
+ * value.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
+ * packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
+ * the nested %OVS_SAMPLE_ATTR_* attributes.
+ *
+ * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
+ * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
+ * type may not be changed.
+ */
+
+enum ovs_action_attr {
+       OVS_ACTION_ATTR_UNSPEC,
+       OVS_ACTION_ATTR_OUTPUT,       /* u32 port number. */
+       OVS_ACTION_ATTR_USERSPACE,    /* Nested OVS_USERSPACE_ATTR_*. */
+       OVS_ACTION_ATTR_SET,          /* One nested OVS_KEY_ATTR_*. */
+       OVS_ACTION_ATTR_PUSH_VLAN,    /* struct ovs_action_push_vlan. */
+       OVS_ACTION_ATTR_POP_VLAN,     /* No argument. */
+       OVS_ACTION_ATTR_SAMPLE,       /* Nested OVS_SAMPLE_ATTR_*. */
+       __OVS_ACTION_ATTR_MAX
+};
+
+#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
+
+#endif /* _LINUX_OPENVSWITCH_H */
index e3d0b3890249163881eec330894cdd8bb945295a..7ef68724f0f054f88427d2dbc18c63b962a16394 100644 (file)
@@ -12,7 +12,7 @@ struct pci_ats {
        unsigned int is_enabled:1;      /* Enable bit is set */
 };
 
-#ifdef CONFIG_PCI_IOV
+#ifdef CONFIG_PCI_ATS
 
 extern int pci_enable_ats(struct pci_dev *dev, int ps);
 extern void pci_disable_ats(struct pci_dev *dev);
@@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
        return dev->ats && dev->ats->is_enabled;
 }
 
-#else /* CONFIG_PCI_IOV */
+#else /* CONFIG_PCI_ATS */
 
 static inline int pci_enable_ats(struct pci_dev *dev, int ps)
 {
@@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
        return 0;
 }
 
-#endif /* CONFIG_PCI_IOV */
+#endif /* CONFIG_PCI_ATS */
 
 #ifdef CONFIG_PCI_PRI
 
index 337df0d5d5f7eb2e80410e4e0980afe4ef4ed83a..7cda65b5f79806005af9aada8831f8403a519db7 100644 (file)
@@ -338,7 +338,7 @@ struct pci_dev {
        struct list_head msi_list;
 #endif
        struct pci_vpd *vpd;
-#ifdef CONFIG_PCI_IOV
+#ifdef CONFIG_PCI_ATS
        union {
                struct pci_sriov *sriov;        /* SR-IOV capability related */
                struct pci_dev *physfn; /* the PF this VF is associated with */
index 172ba70306d1e77a4591a0474ad58c1df9460796..2aaee0ca9da847ec447abc59df56c916f18bc6ac 100644 (file)
 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM  0x1302
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC  0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK  0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0    0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1    0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2    0x1602
 #define PCI_DEVICE_ID_AMD_15H_NB_F3    0x1603
 #define PCI_DEVICE_ID_AMD_15H_NB_F4    0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5    0x1605
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 1e9ebe5e0091e7fa1b1fabb72f72af979f35104c..b1f89122bf6a820102f43714fd42246d1dadd207 100644 (file)
@@ -822,6 +822,7 @@ struct perf_event {
        int                             mmap_locked;
        struct user_struct              *mmap_user;
        struct ring_buffer              *rb;
+       struct list_head                rb_entry;
 
        /* poll related */
        wait_queue_head_t               waitq;
index f53a4167c5f41bb2d9ba9a30fdecc3805f97a2df..f48bfc80cb4bfc7736173f6687550867ed0fc882 100644 (file)
@@ -38,6 +38,7 @@
 #define PNPIPE_ENCAP           1
 #define PNPIPE_IFINDEX         2
 #define PNPIPE_HANDLE          3
+#define PNPIPE_INITSTATE       4
 
 #define PNADDR_ANY             0
 #define PNADDR_BROADCAST       0xFC
@@ -49,6 +50,7 @@
 
 /* ioctls */
 #define SIOCPNGETOBJECT                (SIOCPROTOPRIVATE + 0)
+#define SIOCPNENABLEPIPE       (SIOCPROTOPRIVATE + 13)
 #define SIOCPNADDRESOURCE      (SIOCPROTOPRIVATE + 14)
 #define SIOCPNDELRESOURCE      (SIOCPROTOPRIVATE + 15)
 
index c5336705921fdae6a3feb8ea99dfa252ae012c52..8daced32a014f4db9e8a3bd445030e68a0bdfcc5 100644 (file)
@@ -30,7 +30,7 @@
  */
 
 struct tc_stats {
-       __u64   bytes;                  /* NUmber of enqueues bytes */
+       __u64   bytes;                  /* Number of enqueued bytes */
        __u32   packets;                /* Number of enqueued packets   */
        __u32   drops;                  /* Packets dropped because of lack of resources */
        __u32   overlimits;             /* Number of throttle events when this
@@ -181,6 +181,7 @@ enum {
        TCA_RED_UNSPEC,
        TCA_RED_PARMS,
        TCA_RED_STAB,
+       TCA_RED_MAX_P,
        __TCA_RED_MAX,
 };
 
@@ -194,8 +195,9 @@ struct tc_red_qopt {
        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
        unsigned char   Scell_log;      /* cell size for idle damping */
        unsigned char   flags;
-#define TC_RED_ECN     1
-#define TC_RED_HARDDROP        2
+#define TC_RED_ECN             1
+#define TC_RED_HARDDROP                2
+#define TC_RED_ADAPTATIVE      4
 };
 
 struct tc_red_xstats {
@@ -214,6 +216,7 @@ enum {
        TCA_GRED_PARMS,
        TCA_GRED_STAB,
        TCA_GRED_DPS,
+       TCA_GRED_MAX_P,
           __TCA_GRED_MAX,
 };
 
@@ -253,6 +256,7 @@ enum {
        TCA_CHOKE_UNSPEC,
        TCA_CHOKE_PARMS,
        TCA_CHOKE_STAB,
+       TCA_CHOKE_MAX_P,
        __TCA_CHOKE_MAX,
 };
 
@@ -297,7 +301,7 @@ struct tc_htb_glob {
        __u32 debug;            /* debug flags */
 
        /* stats */
-       __u32 direct_pkts; /* count of non shapped packets */
+       __u32 direct_pkts; /* count of non shaped packets */
 };
 enum {
        TCA_HTB_UNSPEC,
@@ -465,6 +469,7 @@ enum {
        TCA_NETEM_REORDER,
        TCA_NETEM_CORRUPT,
        TCA_NETEM_LOSS,
+       TCA_NETEM_RATE,
        __TCA_NETEM_MAX,
 };
 
@@ -495,6 +500,13 @@ struct tc_netem_corrupt {
        __u32   correlation;
 };
 
+struct tc_netem_rate {
+       __u32   rate;   /* byte/s */
+       __s32   packet_overhead;
+       __u32   cell_size;
+       __s32   cell_overhead;
+};
+
 enum {
        NETEM_LOSS_UNSPEC,
        NETEM_LOSS_GI,          /* General Intuitive - 4 state model */
@@ -503,7 +515,7 @@ enum {
 };
 #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
 
-/* State transition probablities for 4 state model */
+/* State transition probabilities for 4 state model */
 struct tc_netem_gimodel {
        __u32   p13;
        __u32   p31;
index f15acb64681351b72c5b1c460bf0174ad2f33749..3f3ed83a9aa52cf69df6fd7a925d521cbca96f6d 100644 (file)
@@ -54,118 +54,145 @@ typedef struct pm_message {
 /**
  * struct dev_pm_ops - device PM callbacks
  *
- * Several driver power state transitions are externally visible, affecting
+ * Several device power state transitions are externally visible, affecting
  * the state of pending I/O queues and (for drivers that touch hardware)
  * interrupts, wakeups, DMA, and other hardware state.  There may also be
- * internal transitions to various low power modes, which are transparent
+ * internal transitions to various low-power modes which are transparent
  * to the rest of the driver stack (such as a driver that's ON gating off
  * clocks which are not in active use).
  *
- * The externally visible transitions are handled with the help of the following
- * callbacks included in this structure:
- *
- * @prepare: Prepare the device for the upcoming transition, but do NOT change
- *     its hardware state.  Prevent new children of the device from being
- *     registered after @prepare() returns (the driver's subsystem and
- *     generally the rest of the kernel is supposed to prevent new calls to the
- *     probe method from being made too once @prepare() has succeeded).  If
- *     @prepare() detects a situation it cannot handle (e.g. registration of a
- *     child already in progress), it may return -EAGAIN, so that the PM core
- *     can execute it once again (e.g. after the new child has been registered)
- *     to recover from the race condition.  This method is executed for all
- *     kinds of suspend transitions and is followed by one of the suspend
- *     callbacks: @suspend(), @freeze(), or @poweroff().
- *     The PM core executes @prepare() for all devices before starting to
- *     execute suspend callbacks for any of them, so drivers may assume all of
- *     the other devices to be present and functional while @prepare() is being
- *     executed.  In particular, it is safe to make GFP_KERNEL memory
- *     allocations from within @prepare().  However, drivers may NOT assume
- *     anything about the availability of the user space at that time and it
- *     is not correct to request firmware from within @prepare() (it's too
- *     late to do that).  [To work around this limitation, drivers may
- *     register suspend and hibernation notifiers that are executed before the
- *     freezing of tasks.]
+ * The externally visible transitions are handled with the help of callbacks
+ * included in this structure in such a way that two levels of callbacks are
+ * involved.  First, the PM core executes callbacks provided by PM domains,
+ * device types, classes and bus types.  They are the subsystem-level callbacks
+ * supposed to execute callbacks provided by device drivers, although they may
+ * choose not to do that.  If the driver callbacks are executed, they have to
+ * collaborate with the subsystem-level callbacks to achieve the goals
+ * appropriate for the given system transition, given transition phase and the
+ * subsystem the device belongs to.
+ *
+ * @prepare: The principal role of this callback is to prevent new children of
+ *     the device from being registered after it has returned (the driver's
+ *     subsystem and generally the rest of the kernel is supposed to prevent
+ *     new calls to the probe method from being made too once @prepare() has
+ *     succeeded).  If @prepare() detects a situation it cannot handle (e.g.
+ *     registration of a child already in progress), it may return -EAGAIN, so
+ *     that the PM core can execute it once again (e.g. after a new child has
+ *     been registered) to recover from the race condition.
+ *     This method is executed for all kinds of suspend transitions and is
+ *     followed by one of the suspend callbacks: @suspend(), @freeze(), or
+ *     @poweroff().  The PM core executes subsystem-level @prepare() for all
+ *     devices before starting to invoke suspend callbacks for any of them, so
+ *     generally devices may be assumed to be functional or to respond to
+ *     runtime resume requests while @prepare() is being executed.  However,
+ *     device drivers may NOT assume anything about the availability of user
+ *     space at that time and it is NOT valid to request firmware from within
+ *     @prepare() (it's too late to do that).  It also is NOT valid to allocate
+ *     substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
+ *     [To work around these limitations, drivers may register suspend and
+ *     hibernation notifiers to be executed before the freezing of tasks.]
  *
  * @complete: Undo the changes made by @prepare().  This method is executed for
  *     all kinds of resume transitions, following one of the resume callbacks:
  *     @resume(), @thaw(), @restore().  Also called if the state transition
- *     fails before the driver's suspend callback (@suspend(), @freeze(),
- *     @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ *     fails before the driver's suspend callback: @suspend(), @freeze() or
+ *     @poweroff(), can be executed (e.g. if the suspend callback fails for one
  *     of the other devices that the PM core has unsuccessfully attempted to
  *     suspend earlier).
- *     The PM core executes @complete() after it has executed the appropriate
- *     resume callback for all devices.
+ *     The PM core executes subsystem-level @complete() after it has executed
+ *     the appropriate resume callbacks for all devices.
  *
  * @suspend: Executed before putting the system into a sleep state in which the
- *     contents of main memory are preserved.  Quiesce the device, put it into
- *     a low power state appropriate for the upcoming system state (such as
- *     PCI_D3hot), and enable wakeup events as appropriate.
+ *     contents of main memory are preserved.  The exact action to perform
+ *     depends on the device's subsystem (PM domain, device type, class or bus
+ *     type), but generally the device must be quiescent after subsystem-level
+ *     @suspend() has returned, so that it doesn't do any I/O or DMA.
+ *     Subsystem-level @suspend() is executed for all devices after invoking
+ *     subsystem-level @prepare() for all of them.
  *
  * @resume: Executed after waking the system up from a sleep state in which the
- *     contents of main memory were preserved.  Put the device into the
- *     appropriate state, according to the information saved in memory by the
- *     preceding @suspend().  The driver starts working again, responding to
- *     hardware events and software requests.  The hardware may have gone
- *     through a power-off reset, or it may have maintained state from the
- *     previous suspend() which the driver may rely on while resuming.  On most
- *     platforms, there are no restrictions on availability of resources like
- *     clocks during @resume().
+ *     contents of main memory were preserved.  The exact action to perform
+ *     depends on the device's subsystem, but generally the driver is expected
+ *     to start working again, responding to hardware events and software
+ *     requests (the device itself may be left in a low-power state, waiting
+ *     for a runtime resume to occur).  The state of the device at the time its
+ *     driver's @resume() callback is run depends on the platform and subsystem
+ *     the device belongs to.  On most platforms, there are no restrictions on
+ *     availability of resources like clocks during @resume().
+ *     Subsystem-level @resume() is executed for all devices after invoking
+ *     subsystem-level @resume_noirq() for all of them.
  *
  * @freeze: Hibernation-specific, executed before creating a hibernation image.
- *     Quiesce operations so that a consistent image can be created, but do NOT
- *     otherwise put the device into a low power device state and do NOT emit
- *     system wakeup events.  Save in main memory the device settings to be
- *     used by @restore() during the subsequent resume from hibernation or by
- *     the subsequent @thaw(), if the creation of the image or the restoration
- *     of main memory contents from it fails.
+ *     Analogous to @suspend(), but it should not enable the device to signal
+ *     wakeup events or change its power state.  The majority of subsystems
+ *     (with the notable exception of the PCI bus type) expect the driver-level
+ *     @freeze() to save the device settings in memory to be used by @restore()
+ *     during the subsequent resume from hibernation.
+ *     Subsystem-level @freeze() is executed for all devices after invoking
+ *     subsystem-level @prepare() for all of them.
  *
  * @thaw: Hibernation-specific, executed after creating a hibernation image OR
- *     if the creation of the image fails.  Also executed after a failing
+ *     if the creation of an image has failed.  Also executed after a failing
  *     attempt to restore the contents of main memory from such an image.
  *     Undo the changes made by the preceding @freeze(), so the device can be
  *     operated in the same way as immediately before the call to @freeze().
+ *     Subsystem-level @thaw() is executed for all devices after invoking
+ *     subsystem-level @thaw_noirq() for all of them.  It also may be executed
+ *     directly after @freeze() in case of a transition error.
  *
  * @poweroff: Hibernation-specific, executed after saving a hibernation image.
- *     Quiesce the device, put it into a low power state appropriate for the
- *     upcoming system state (such as PCI_D3hot), and enable wakeup events as
- *     appropriate.
+ *     Analogous to @suspend(), but it need not save the device's settings in
+ *     memory.
+ *     Subsystem-level @poweroff() is executed for all devices after invoking
+ *     subsystem-level @prepare() for all of them.
  *
  * @restore: Hibernation-specific, executed after restoring the contents of main
- *     memory from a hibernation image.  Driver starts working again,
- *     responding to hardware events and software requests.  Drivers may NOT
- *     make ANY assumptions about the hardware state right prior to @restore().
- *     On most platforms, there are no restrictions on availability of
- *     resources like clocks during @restore().
- *
- * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
- *     actions required for suspending the device that need interrupts to be
- *     disabled
- *
- * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
- *     actions required for resuming the device that need interrupts to be
- *     disabled
- *
- * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
- *     actions required for freezing the device that need interrupts to be
- *     disabled
- *
- * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
- *     actions required for thawing the device that need interrupts to be
- *     disabled
- *
- * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
- *     actions required for handling the device that need interrupts to be
- *     disabled
- *
- * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
- *     actions required for restoring the operations of the device that need
- *     interrupts to be disabled
+ *     memory from a hibernation image, analogous to @resume().
+ *
+ * @suspend_noirq: Complete the actions started by @suspend().  Carry out any
+ *     additional operations required for suspending the device that might be
+ *     racing with its driver's interrupt handler, which is guaranteed not to
+ *     run while @suspend_noirq() is being executed.
+ *     It generally is expected that the device will be in a low-power state
+ *     (appropriate for the target system sleep state) after subsystem-level
+ *     @suspend_noirq() has returned successfully.  If the device can generate
+ *     system wakeup signals and is enabled to wake up the system, it should be
+ *     configured to do so at that time.  However, depending on the platform
+ *     and device's subsystem, @suspend() may be allowed to put the device into
+ *     the low-power state and configure it to generate wakeup signals, in
+ *     which case it generally is not necessary to define @suspend_noirq().
+ *
+ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+ *     operations required for resuming the device that might be racing with
+ *     its driver's interrupt handler, which is guaranteed not to run while
+ *     @resume_noirq() is being executed.
+ *
+ * @freeze_noirq: Complete the actions started by @freeze().  Carry out any
+ *     additional operations required for freezing the device that might be
+ *     racing with its driver's interrupt handler, which is guaranteed not to
+ *     run while @freeze_noirq() is being executed.
+ *     The power state of the device should not be changed by either @freeze()
+ *     or @freeze_noirq() and it should not be configured to signal system
+ *     wakeup by any of these callbacks.
+ *
+ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+ *     operations required for thawing the device that might be racing with its
+ *     driver's interrupt handler, which is guaranteed not to run while
+ *     @thaw_noirq() is being executed.
+ *
+ * @poweroff_noirq: Complete the actions started by @poweroff().  Analogous to
+ *     @suspend_noirq(), but it need not save the device's settings in memory.
+ *
+ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
+ *     operations required for thawing the device that might be racing with its
+ *     driver's interrupt handler, which is guaranteed not to run while
+ *     @restore_noirq() is being executed.  Analogous to @resume_noirq().
  *
  * All of the above callbacks, except for @complete(), return error codes.
  * However, the error codes returned by the resume operations, @resume(),
- * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
  * not cause the PM core to abort the resume transition during which they are
- * returned.  The error codes returned in that cases are only printed by the PM
+ * returned.  The error codes returned in those cases are only printed by the PM
  * core to the system logs for debugging purposes.  Still, it is recommended
  * that drivers only return error codes from their resume methods in case of an
  * unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
  * their children.
  *
  * It is allowed to unregister devices while the above callbacks are being
- * executed.  However, it is not allowed to unregister a device from within any
- * of its own callbacks.
+ * executed.  However, a callback routine must NOT try to unregister the device
+ * it was called for, although it may unregister children of that device (for
+ * example, if it detects that a child was unplugged while the system was
+ * asleep).
+ *
+ * Refer to Documentation/power/devices.txt for more information about the role
+ * of the above callbacks in the system suspend process.
  *
- * There also are the following callbacks related to run-time power management
- * of devices:
+ * There also are callbacks related to runtime power management of devices.
+ * Again, these callbacks are executed by the PM core only for subsystems
+ * (PM domains, device types, classes and bus types) and the subsystem-level
+ * callbacks are supposed to invoke the driver callbacks.  Moreover, the exact
+ * actions to be performed by a device driver's callbacks generally depend on
+ * the platform and subsystem the device belongs to.
  *
  * @runtime_suspend: Prepare the device for a condition in which it won't be
  *     able to communicate with the CPU(s) and RAM due to power management.
- *     This need not mean that the device should be put into a low power state.
+ *     This need not mean that the device should be put into a low-power state.
  *     For example, if the device is behind a link which is about to be turned
  *     off, the device may remain at full power.  If the device does go to low
- *     power and is capable of generating run-time wake-up events, remote
- *     wake-up (i.e., a hardware mechanism allowing the device to request a
- *     change of its power state via a wake-up event, such as PCI PME) should
- *     be enabled for it.
+ *     power and is capable of generating runtime wakeup events, remote wakeup
+ *     (i.e., a hardware mechanism allowing the device to request a change of
+ *     its power state via an interrupt) should be enabled for it.
  *
  * @runtime_resume: Put the device into the fully active state in response to a
- *     wake-up event generated by hardware or at the request of software.  If
- *     necessary, put the device into the full power state and restore its
+ *     wakeup event generated by hardware or at the request of software.  If
+ *     necessary, put the device into the full-power state and restore its
  *     registers, so that it is fully operational.
  *
- * @runtime_idle: Device appears to be inactive and it might be put into a low
- *     power state if all of the necessary conditions are satisfied.  Check
+ * @runtime_idle: Device appears to be inactive and it might be put into a
+ *     low-power state if all of the necessary conditions are satisfied.  Check
  *     these conditions and handle the device as appropriate, possibly queueing
  *     a suspend request for it.  The return value is ignored by the PM core.
+ *
+ * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * role of the above callbacks in device runtime power management.
+ *
  */
 
 struct dev_pm_ops {
@@ -447,6 +486,7 @@ struct dev_pm_info {
        unsigned int            async_suspend:1;
        bool                    is_prepared:1;  /* Owned by the PM core */
        bool                    is_suspended:1; /* Ditto */
+       bool                    ignore_children:1;
        spinlock_t              lock;
 #ifdef CONFIG_PM_SLEEP
        struct list_head        entry;
@@ -464,7 +504,6 @@ struct dev_pm_info {
        atomic_t                usage_count;
        atomic_t                child_count;
        unsigned int            disable_depth:3;
-       unsigned int            ignore_children:1;
        unsigned int            idle_notification:1;
        unsigned int            request_pending:1;
        unsigned int            deferred_resume:1;
index d8d90361964216f771ed62e5d17fb083451e2796..d3085e72a0ee9b43bbd024bf33b93939e1535d42 100644 (file)
@@ -52,11 +52,6 @@ static inline bool pm_children_suspended(struct device *dev)
                || !atomic_read(&dev->power.child_count);
 }
 
-static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
-{
-       dev->power.ignore_children = enable;
-}
-
 static inline void pm_runtime_get_noresume(struct device *dev)
 {
        atomic_inc(&dev->power.usage_count);
@@ -130,7 +125,6 @@ static inline void pm_runtime_allow(struct device *dev) {}
 static inline void pm_runtime_forbid(struct device *dev) {}
 
 static inline bool pm_children_suspended(struct device *dev) { return false; }
-static inline void pm_suspend_ignore_children(struct device *dev, bool en) {}
 static inline void pm_runtime_get_noresume(struct device *dev) {}
 static inline void pm_runtime_put_noidle(struct device *dev) {}
 static inline bool device_run_wake(struct device *dev) { return false; }
index ea567321ae3c3b2ac02967680f99de1687f99982..2ca8cde5459d3445b2897c02d58ac2d12dba5bba 100644 (file)
@@ -35,10 +35,12 @@ struct pstore_info {
        spinlock_t      buf_lock;       /* serialize access to 'buf' */
        char            *buf;
        size_t          bufsize;
+       struct mutex    read_mutex;     /* serialize open/read/close */
        int             (*open)(struct pstore_info *psi);
        int             (*close)(struct pstore_info *psi);
        ssize_t         (*read)(u64 *id, enum pstore_type_id *type,
-                       struct timespec *time, struct pstore_info *psi);
+                       struct timespec *time, char **buf,
+                       struct pstore_info *psi);
        int             (*write)(enum pstore_type_id type, u64 *id,
                        unsigned int part, size_t size, struct pstore_info *psi);
        int             (*erase)(enum pstore_type_id type, u64 id,
index 68daf4f27e2c337102258d71450eabc5a14178dd..1c4f3e9b9bc50e52ea57623f5b886691fb52db14 100644 (file)
@@ -1521,7 +1521,6 @@ struct task_struct {
 #ifdef CONFIG_FAULT_INJECTION
        int make_it_fail;
 #endif
-       struct prop_local_single dirties;
        /*
         * when (nr_dirtied >= nr_dirtied_pause), it's time to call
         * balance_dirty_pages() for some dirty throttling pause
index 19d8e04e16884c2bfb860fe2f72715306753ea15..e8c619d39291b4c18b028224299d882dbf56aa16 100644 (file)
@@ -2056,7 +2056,7 @@ static inline int security_old_inode_init_security(struct inode *inode,
                                                   char **name, void **value,
                                                   size_t *len)
 {
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 static inline int security_inode_create(struct inode *dir,
index 97ff8e27a6cccfdc0678211db90b1fcde056f1da..3d86517fe7d5bf71f16efa468cec3fb9b4485c17 100644 (file)
@@ -207,13 +207,15 @@ struct serial_icounter_struct {
 
 struct serial_rs485 {
        __u32   flags;                  /* RS485 feature flags */
-#define SER_RS485_ENABLED              (1 << 0)
-#define SER_RS485_RTS_ON_SEND          (1 << 1)
-#define SER_RS485_RTS_AFTER_SEND       (1 << 2)
-#define SER_RS485_RTS_BEFORE_SEND      (1 << 3)
+#define SER_RS485_ENABLED              (1 << 0)        /* If enabled */
+#define SER_RS485_RTS_ON_SEND          (1 << 1)        /* Logical level for
+                                                          RTS pin when
+                                                          sending */
+#define SER_RS485_RTS_AFTER_SEND       (1 << 2)        /* Logical level for
+                                                          RTS pin after sent*/
 #define SER_RS485_RX_DURING_TX         (1 << 4)
-       __u32   delay_rts_before_send;  /* Milliseconds */
-       __u32   delay_rts_after_send;   /* Milliseconds */
+       __u32   delay_rts_before_send;  /* Delay before send (milliseconds) */
+       __u32   delay_rts_after_send;   /* Delay after send (milliseconds) */
        __u32   padding[5];             /* Memory is cheap, new structs
                                           are a royal PITA .. */
 };
index a83833a1f7a26f589a7584252ff413333286f5ea..07ceb97d53facc505bae4b489d3cae96bd7d4021 100644 (file)
@@ -35,7 +35,7 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
-       long nr;        /* objs pending delete */
+       atomic_long_t nr_in_batch; /* objs pending delete */
 };
 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
 extern void register_shrinker(struct shrinker *);
index e2accb3164d8d969245e4567b0f308d4a35cb152..d0de882c0d96d5277f23306bf1d2884c40807292 100644 (file)
@@ -24,7 +24,7 @@ struct sigma_firmware {
 struct sigma_firmware_header {
        unsigned char magic[7];
        u8 version;
-       u32 crc;
+       __le32 crc;
 };
 
 enum {
@@ -40,19 +40,14 @@ enum {
 struct sigma_action {
        u8 instr;
        u8 len_hi;
-       u16 len;
-       u16 addr;
+       __le16 len;
+       __be16 addr;
        unsigned char payload[];
 };
 
 static inline u32 sigma_action_len(struct sigma_action *sa)
 {
-       return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
-       return sizeof(*sa) + payload_len + (payload_len % 2);
+       return (sa->len_hi << 16) | le16_to_cpu(sa->len);
 }
 
 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
index f444264e3612eb4c1f0e90d6ab2cb681b7061ac7..f47f0c3939f2b3214101d3e8aa8719b2d38de358 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/dmaengine.h>
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
 
 /* Don't change this without changing skb_csum_unnecessary! */
 #define CHECKSUM_NONE 0
@@ -87,7 +88,6 @@
  *     at device setup time.
  *     NETIF_F_HW_CSUM - it is clever device, it is able to checksum
  *                       everything.
- *     NETIF_F_NO_CSUM - loopback or reliable single hop media.
  *     NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  *                       TCP/UDP over IPv4. Sigh. Vendors like this
  *                       way by an unknown reason. Though, see comment above
@@ -128,13 +128,17 @@ struct sk_buff_head {
 
 struct sk_buff;
 
-/* To allow 64K frame to be packed as single skb without frag_list. Since
- * GRO uses frags we allocate at least 16 regardless of page size.
+/* To allow 64K frame to be packed as single skb without frag_list we
+ * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
+ * buffers which do not start on a page boundary.
+ *
+ * Since GRO uses frags we allocate at least 16 regardless of page
+ * size.
  */
-#if (65536/PAGE_SIZE + 2) < 16
+#if (65536/PAGE_SIZE + 1) < 16
 #define MAX_SKB_FRAGS 16UL
 #else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 #endif
 
 typedef struct skb_frag_struct skb_frag_t;
@@ -546,6 +550,7 @@ extern void consume_skb(struct sk_buff *skb);
 extern void           __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
                                   gfp_t priority, int fclone, int node);
+extern struct sk_buff *build_skb(void *data);
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
@@ -567,8 +572,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb,
                                 gfp_t priority);
 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
                                gfp_t priority);
-extern struct sk_buff *pskb_copy(struct sk_buff *skb,
-                                gfp_t gfp_mask);
+extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
+                                int headroom, gfp_t gfp_mask);
+
 extern int            pskb_expand_head(struct sk_buff *skb,
                                        int nhead, int ntail,
                                        gfp_t gfp_mask);
@@ -1667,38 +1673,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
        return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
 }
 
-/**
- *     __netdev_alloc_page - allocate a page for ps-rx on a specific device
- *     @dev: network device to receive on
- *     @gfp_mask: alloc_pages_node mask
- *
- *     Allocate a new page. dev currently unused.
- *
- *     %NULL is returned if there is no free memory.
- */
-static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
-{
-       return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
-}
-
-/**
- *     netdev_alloc_page - allocate a page for ps-rx on a specific device
- *     @dev: network device to receive on
- *
- *     Allocate a new page. dev currently unused.
- *
- *     %NULL is returned if there is no free memory.
- */
-static inline struct page *netdev_alloc_page(struct net_device *dev)
-{
-       return __netdev_alloc_page(dev, GFP_ATOMIC);
-}
-
-static inline void netdev_free_page(struct net_device *dev, struct page *page)
-{
-       __free_page(page);
-}
-
 /**
  * skb_frag_page - retrieve the page refered to by a paged fragment
  * @frag: the paged fragment
@@ -1830,6 +1804,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
                            frag->page_offset + offset, size, dir);
 }
 
+static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
+                                       gfp_t gfp_mask)
+{
+       return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
+}
+
 /**
  *     skb_clone_writable - is the header of a clone writable
  *     @skb: buffer to check
@@ -2111,7 +2091,8 @@ extern void              skb_split(struct sk_buff *skb,
 extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
                                 int shiftlen);
 
-extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb,
+                                  netdev_features_t features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
new file mode 100644 (file)
index 0000000..ce718cb
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef __LINUX_SMSCPHY_H__
+#define __LINUX_SMSCPHY_H__
+
+#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
+#define MII_LAN83C185_IM  30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
+
+#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
+#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
+#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
+#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
+#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
+#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
+#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
+
+#define MII_LAN83C185_ISF_INT_ALL (0x0e)
+
+#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
+       (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+        MII_LAN83C185_ISF_INT7)
+
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
+#define MII_LAN83C185_ENERGYON  (1 << 1)  /* ENERGYON */
+
+#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
new file mode 100644 (file)
index 0000000..251729a
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __SOCK_DIAG_H__
+#define __SOCK_DIAG_H__
+
+#include <linux/types.h>
+
+#define SOCK_DIAG_BY_FAMILY 20
+
+struct sock_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+};
+
+enum {
+       SK_MEMINFO_RMEM_ALLOC,
+       SK_MEMINFO_RCVBUF,
+       SK_MEMINFO_WMEM_ALLOC,
+       SK_MEMINFO_SNDBUF,
+       SK_MEMINFO_FWD_ALLOC,
+       SK_MEMINFO_WMEM_QUEUED,
+       SK_MEMINFO_OPTMEM,
+
+       SK_MEMINFO_VARS,
+};
+
+#ifdef __KERNEL__
+struct sk_buff;
+struct nlmsghdr;
+struct sock;
+
+struct sock_diag_handler {
+       __u8 family;
+       int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+int sock_diag_register(struct sock_diag_handler *h);
+void sock_diag_unregister(struct sock_diag_handler *h);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie);
+void sock_diag_save_cookie(void *sk, __u32 *cookie);
+
+int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
+
+extern struct sock *sock_diag_nlsk;
+#endif /* KERNEL */
+#endif
index 3d8f9c44e27d8a161daed9f8bcdd50591d906991..2c5993a17c3315423cbb3ba895fb8f24f4ac0bb3 100644 (file)
@@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
        return true;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
                                   const struct sockaddr *sap2)
 {
@@ -237,10 +237,10 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
        struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
 
        dsin6->sin6_family = ssin6->sin6_family;
-       ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr);
+       dsin6->sin6_addr = ssin6->sin6_addr;
        return true;
 }
-#else  /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#else  /* !(IS_ENABLED(CONFIG_IPV6) */
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
                                   const struct sockaddr *sap2)
 {
@@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
 {
        return false;
 }
-#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#endif /* !(IS_ENABLED(CONFIG_IPV6) */
 
 /**
  * rpc_cmp_addr - compare the address portion of two sockaddrs.
index 7f59ee94698336fb32be6985e9f791d8b5149dfc..46a85c9e1f25bebddd3a1887b30fb182dcf5b5e4 100644 (file)
@@ -238,6 +238,11 @@ struct tcp_sack_block {
        u32     end_seq;
 };
 
+/*These are used to set the sack_ok field in struct tcp_options_received */
+#define TCP_SACK_SEEN     (1 << 0)   /*1 = peer is SACK capable, */
+#define TCP_FACK_ENABLED  (1 << 1)   /*1 = FACK is enabled locally*/
+#define TCP_DSACK_SEEN    (1 << 2)   /*1 = DSACK was received from peer*/
+
 struct tcp_options_received {
 /*     PAWS/RTTM data  */
        long    ts_recent_stamp;/* Time we stored ts_recent (for aging) */
diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h
new file mode 100644 (file)
index 0000000..b1d2bf1
--- /dev/null
@@ -0,0 +1,54 @@
+#ifndef __UNIX_DIAG_H__
+#define __UNIX_DIAG_H__
+
+#include <linux/types.h>
+
+struct unix_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u16   pad;
+       __u32   udiag_states;
+       __u32   udiag_ino;
+       __u32   udiag_show;
+       __u32   udiag_cookie[2];
+};
+
+#define UDIAG_SHOW_NAME                0x00000001      /* show name (not path) */
+#define UDIAG_SHOW_VFS         0x00000002      /* show VFS inode info */
+#define UDIAG_SHOW_PEER                0x00000004      /* show peer socket info */
+#define UDIAG_SHOW_ICONS       0x00000008      /* show pending connections */
+#define UDIAG_SHOW_RQLEN       0x00000010      /* show skb receive queue len */
+#define UDIAG_SHOW_MEMINFO     0x00000020      /* show memory info of a socket */
+
+struct unix_diag_msg {
+       __u8    udiag_family;
+       __u8    udiag_type;
+       __u8    udiag_state;
+       __u8    pad;
+
+       __u32   udiag_ino;
+       __u32   udiag_cookie[2];
+};
+
+enum {
+       UNIX_DIAG_NAME,
+       UNIX_DIAG_VFS,
+       UNIX_DIAG_PEER,
+       UNIX_DIAG_ICONS,
+       UNIX_DIAG_RQLEN,
+       UNIX_DIAG_MEMINFO,
+
+       UNIX_DIAG_MAX,
+};
+
+struct unix_diag_vfs {
+       __u32   udiag_vfs_ino;
+       __u32   udiag_vfs_dev;
+};
+
+struct unix_diag_rqlen {
+       __u32   udiag_rqueue;
+       __u32   udiag_wqueue;
+};
+
+#endif
index add4790b21fe4ee18635db9456a43db71bb39997..5206d6541da5726bde95c96e2ab8fa4e63be2ceb 100644 (file)
@@ -85,6 +85,8 @@
  * @reset: reset the device
  *     vdev: the virtio device
  *     After this, status and feature negotiation must be done again
+ *     Device must not be reset from its vq/config callbacks, or in
+ *     parallel with being added/removed.
  * @find_vqs: find virtqueues and instantiate them.
  *     vdev: the virtio_device
  *     nvqs: the number of virtqueues to find
  *     vdev: the virtio_device
  *     This gives the final feature bits for the device: it can change
  *     the dev->feature bits if it wants.
+ * @bus_name: return the bus name associated with the device
+ *     vdev: the virtio_device
+ *      This returns a pointer to the bus name a la pci_name from which
+ *      the caller can then copy.
  */
 typedef void vq_callback_t(struct virtqueue *);
 struct virtio_config_ops {
@@ -117,6 +123,7 @@ struct virtio_config_ops {
        void (*del_vqs)(struct virtio_device *);
        u32 (*get_features)(struct virtio_device *vdev);
        void (*finalize_features)(struct virtio_device *vdev);
+       const char *(*bus_name)(struct virtio_device *vdev);
 };
 
 /* If driver didn't advertise the feature, it will never appear. */
@@ -182,5 +189,14 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
                return ERR_PTR(err);
        return vq;
 }
+
+static inline
+const char *virtio_bus_name(struct virtio_device *vdev)
+{
+       if (!vdev->config->bus_name)
+               return "virtio";
+       return vdev->config->bus_name(vdev);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_VIRTIO_CONFIG_H */
index 27c7edefbc86ea6cad445f3503de9970b1088bf7..5c7b6f0daef8f1f228c6daed124e589acdfdb649 100644 (file)
@@ -63,7 +63,7 @@
 #define VIRTIO_MMIO_GUEST_FEATURES     0x020
 
 /* Activated features set selector - Write Only */
-#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024
+#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
 
 /* Guest's memory page size in bytes - Write Only */
 #define VIRTIO_MMIO_GUEST_PAGE_SIZE    0x028
index 687fb11e20107d9c22467e1923d5e736bde16d3e..4bde182fcf93f23bb8f1b8b68d2a087ba20ae2c3 100644 (file)
@@ -119,7 +119,7 @@ unmap_kernel_range(unsigned long addr, unsigned long size)
 #endif
 
 /* Allocate/destroy a 'vmalloc' VM area. */
-extern struct vm_struct *alloc_vm_area(size_t size);
+extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
 extern void free_vm_area(struct vm_struct *area);
 
 /* for /dev/kmem */
index b1377b931eb7e4c28b8852936af55d08ef40693c..5fb2c3d10c05047a4c50fb2e4440e7862a6a9908 100644 (file)
@@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
 static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
 {
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        return icd ? icd->vdev : NULL;
 }
 
@@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu
        return container_of(vq, struct soc_camera_device, vb_vidq);
 }
 
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+       return (icd->iface << 8) | (icd->devnum + 1);
+}
+
 void soc_camera_lock(struct vb2_queue *vq);
 void soc_camera_unlock(struct vb2_queue *vq);
 
index cbc6bb0a68386da58ec539fe19fa3cc7356fdfdc..f68dce2d8d88f806a162ebce3a67714e856bbcb2 100644 (file)
@@ -151,7 +151,8 @@ extern int ipv6_chk_mcast_addr(struct net_device *dev,
                               const struct in6_addr *src_addr);
 extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr);
 
-extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len);
+extern void addrconf_prefix_rcv(struct net_device *dev,
+                               u8 *opt, int len, bool sllao);
 
 /*
  *     anycast prototypes (anycast.c)
index 91ab5b01678a4eb1603ae3cea1c90d28a121e348..5a4e29b168c9ad666949dd0b0850388a443c5793 100644 (file)
@@ -11,10 +11,13 @@ extern void unix_notinflight(struct file *fp);
 extern void unix_gc(void);
 extern void wait_for_unix_gc(void);
 extern struct sock *unix_get_socket(struct file *filp);
+extern struct sock *unix_peer_get(struct sock *);
 
 #define UNIX_HASH_SIZE 256
 
 extern unsigned int unix_tot_inflight;
+extern spinlock_t unix_table_lock;
+extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
 
 struct unix_address {
        atomic_t        refcnt;
@@ -63,6 +66,9 @@ struct unix_sock {
 
 #define peer_wait peer_wq.wait
 
+long unix_inq_len(struct sock *sk);
+long unix_outq_len(struct sock *sk);
+
 #ifdef CONFIG_SYSCTL
 extern int unix_sysctl_register(struct net *net);
 extern void unix_sysctl_unregister(struct net *net);
index 4979af8b15599adcbc6309c902be3caa325b6e67..0013dc87940bf17c77a1a0c083b60c5c2f6a0a57 100644 (file)
@@ -23,7 +23,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct neigh_table *tbl, str
 
        rcu_read_lock_bh();
        nht = rcu_dereference_bh(tbl->nht);
-       hash_val = arp_hashfn(key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+       hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
        for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
             n != NULL;
             n = rcu_dereference_bh(n->next)) {
index 497ef6444a7a69164864f799fad7707c0e35635e..5865924d4aace7fd733e368021579099b4961982 100644 (file)
@@ -15,7 +15,6 @@
 
 
 #define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
-#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key)
 
 struct sk_buff;
 
@@ -36,24 +35,18 @@ struct clip_vcc {
 
 
 struct atmarp_entry {
-       __be32          ip;             /* IP address */
        struct clip_vcc *vccs;          /* active VCCs; NULL if resolution is
                                           pending */
        unsigned long   expires;        /* entry expiration time */
        struct neighbour *neigh;        /* neighbour back-pointer */
 };
 
-
 #define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
 
-
 struct clip_priv {
        int number;                     /* for convenience ... */
        spinlock_t xoff_lock;           /* ensures that pop is atomic (SMP) */
        struct net_device *next;        /* next CLIP interface */
 };
 
-
-extern struct neigh_table *clip_tbl_hook;
-
 #endif
index 9572cbd12a7af0b8c746f8a22ae6c0984409d84b..68f5891506925f3101389b2afed34900231291c2 100644 (file)
@@ -820,7 +820,7 @@ static inline __u8 __ctrl_size(struct l2cap_chan *chan)
                return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
 }
 
-extern int disable_ertm;
+extern bool disable_ertm;
 
 int l2cap_init_sockets(void);
 void l2cap_cleanup_sockets(void);
index c011281d92c08d7b6d8189fc77ac887da3b620d9..ef2dd9438bb1124fe6426fea3769ec1f78429633 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfcnfg.h>
+#include <net/caif/caif_device.h>
 #include <linux/caif/caif_socket.h>
 #include <linux/if.h>
 #include <linux/net.h>
@@ -104,4 +105,24 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer,
  */
 void caif_free_client(struct cflayer *adap_layer);
 
+/**
+ * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer
+ * @dev:               Network device to enroll.
+ * @caifdev:           Configuration information from CAIF Link Layer
+ * @link_support:      Link layer support layer
+ * @head_room:         Head room needed by link support layer
+ * @layer:             Lowest layer in CAIF stack
+ * @rcv_fun:           Receive function for CAIF stack.
+ *
+ * This function enroll a CAIF link layer into CAIF Stack and
+ * expects the interface to be able to handle CAIF payload.
+ * The link_support layer is used to add any Link Layer specific
+ * framing.
+ */
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+                       struct cflayer *link_support, int head_room,
+                       struct cflayer **layer, int (**rcv_func)(
+                               struct sk_buff *, struct net_device *,
+                               struct packet_type *, struct net_device *));
+
 #endif /* CAIF_DEV_H_ */
index 35bc7883cf9720e8e4dcbd85c7c7a16890d179fe..0f3a39125f909f3e0b52c1addff4cd95d07b5b91 100644 (file)
@@ -121,9 +121,7 @@ enum caif_direction {
  * @transmit:  Packet transmit funciton.
  * @ctrlcmd:   Used for control signalling upwards in the stack.
  * @modemcmd:  Used for control signaling downwards in the stack.
- * @prio:      Priority of this layer.
  * @id:                The identity of this layer
- * @type:      The type of this layer
  * @name:      Name of the layer.
  *
  *  This structure defines the layered structure in CAIF.
@@ -230,9 +228,7 @@ struct cflayer {
         */
        int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
 
-       unsigned short prio;
        unsigned int id;
-       unsigned int type;
        char name[CAIF_LAYER_NAME_SZ];
 };
 
index 87c3d11b8e555ff2ebd9a07f504cd8ab2b5fcebd..aa6a485b054512cc622148f81efdfea1cf43109e 100644 (file)
@@ -55,8 +55,8 @@
 struct cfspi_xfer {
        u16 tx_dma_len;
        u16 rx_dma_len;
-       void *va_tx;
-       dma_addr_t pa_tx;
+       void *va_tx[2];
+       dma_addr_t pa_tx[2];
        void *va_rx;
        dma_addr_t pa_rx;
 };
index 3e93a4a4b677dfc811a9d91d954bf290be8e0964..90b4ff8bad838819d62d5a230bd970090b0e8808 100644 (file)
 
 struct cfcnfg;
 
-/**
- * enum cfcnfg_phy_type -  Types of physical layers defined in CAIF Stack
- *
- * @CFPHYTYPE_FRAG:    Fragmented frames physical interface.
- * @CFPHYTYPE_CAIF:    Generic CAIF physical interface
- */
-enum cfcnfg_phy_type {
-       CFPHYTYPE_FRAG = 1,
-       CFPHYTYPE_CAIF,
-       CFPHYTYPE_MAX
-};
-
 /**
  * enum cfcnfg_phy_preference - Physical preference HW Abstraction
  *
@@ -66,21 +54,20 @@ void cfcnfg_remove(struct cfcnfg *cfg);
  * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
  * @cnfg:      Pointer to a CAIF configuration object, created by
  *             cfcnfg_create().
- * @phy_type:  Specifies the type of physical interface, e.g.
- *                     CFPHYTYPE_FRAG.
  * @dev:       Pointer to link layer device
  * @phy_layer: Specify the physical layer. The transmit function
  *             MUST be set in the structure.
  * @pref:      The phy (link layer) preference.
+ * @link_support: Protocol implementation for link layer specific protocol.
  * @fcs:       Specify if checksum is used in CAIF Framing Layer.
- * @stx:       Specify if Start Of Frame eXtention is used.
+ * @head_room: Head space needed by link specific protocol.
  */
-
 void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
-                    bool fcs, bool stx);
+                    struct cflayer *link_support,
+                    bool fcs, int head_room);
 
 /**
  * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
index b8374321b362f0726ed3a3d0bfb1c95210cad620..f121299a34272f014c61b3f3996964b313b9bde3 100644 (file)
@@ -8,5 +8,5 @@
 #define CFSERL_H_
 #include <net/caif/caif_layer.h>
 
-struct cflayer *cfserl_create(int type, int instance, bool use_stx);
-#endif                         /* CFSERL_H_ */
+struct cflayer *cfserl_create(int instance, bool use_stx);
+#endif
index 839f768f9e35c6ac175ff03d42b694839f7fd49a..7828ebf99ee132241b76e500681a43188143da99 100644 (file)
 #ifndef __LINUX_NET_DSA_H
 #define __LINUX_NET_DSA_H
 
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
 #define DSA_MAX_SWITCHES       4
 #define DSA_MAX_PORTS          12
 
@@ -54,8 +59,143 @@ struct dsa_platform_data {
        struct dsa_chip_data    *chip;
 };
 
-extern bool dsa_uses_dsa_tags(void *dsa_ptr);
-extern bool dsa_uses_trailer_tags(void *dsa_ptr);
+struct dsa_switch_tree {
+       /*
+        * Configuration data for the platform device that owns
+        * this dsa switch tree instance.
+        */
+       struct dsa_platform_data        *pd;
+
+       /*
+        * Reference to network device to use, and which tagging
+        * protocol to use.
+        */
+       struct net_device       *master_netdev;
+       __be16                  tag_protocol;
+
+       /*
+        * The switch and port to which the CPU is attached.
+        */
+       s8                      cpu_switch;
+       s8                      cpu_port;
+
+       /*
+        * Link state polling.
+        */
+       int                     link_poll_needed;
+       struct work_struct      link_poll_work;
+       struct timer_list       link_poll_timer;
+
+       /*
+        * Data for the individual switch chips.
+        */
+       struct dsa_switch       *ds[DSA_MAX_SWITCHES];
+};
+
+struct dsa_switch {
+       /*
+        * Parent switch tree, and switch index.
+        */
+       struct dsa_switch_tree  *dst;
+       int                     index;
+
+       /*
+        * Configuration data for this switch.
+        */
+       struct dsa_chip_data    *pd;
+
+       /*
+        * The used switch driver.
+        */
+       struct dsa_switch_driver        *drv;
+
+       /*
+        * Reference to mii bus to use.
+        */
+       struct mii_bus          *master_mii_bus;
+
+       /*
+        * Slave mii_bus and devices for the individual ports.
+        */
+       u32                     dsa_port_mask;
+       u32                     phys_port_mask;
+       struct mii_bus          *slave_mii_bus;
+       struct net_device       *ports[DSA_MAX_PORTS];
+};
+
+static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
+{
+       return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+}
+
+static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+{
+       struct dsa_switch_tree *dst = ds->dst;
+
+       /*
+        * If this is the root switch (i.e. the switch that connects
+        * to the CPU), return the cpu port number on this switch.
+        * Else return the (DSA) port number that connects to the
+        * switch that is one hop closer to the cpu.
+        */
+       if (dst->cpu_switch == ds->index)
+               return dst->cpu_port;
+       else
+               return ds->pd->rtable[dst->cpu_switch];
+}
+
+struct dsa_switch_driver {
+       struct list_head        list;
+
+       __be16                  tag_protocol;
+       int                     priv_size;
+
+       /*
+        * Probing and setup.
+        */
+       char    *(*probe)(struct mii_bus *bus, int sw_addr);
+       int     (*setup)(struct dsa_switch *ds);
+       int     (*set_addr)(struct dsa_switch *ds, u8 *addr);
+
+       /*
+        * Access to the switch's PHY registers.
+        */
+       int     (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+       int     (*phy_write)(struct dsa_switch *ds, int port,
+                            int regnum, u16 val);
+
+       /*
+        * Link state polling and IRQ handling.
+        */
+       void    (*poll_link)(struct dsa_switch *ds);
+
+       /*
+        * ethtool hardware statistics.
+        */
+       void    (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
+       void    (*get_ethtool_stats)(struct dsa_switch *ds,
+                                    int port, uint64_t *data);
+       int     (*get_sset_count)(struct dsa_switch *ds);
+};
+
+void register_switch_driver(struct dsa_switch_driver *type);
+void unregister_switch_driver(struct dsa_switch_driver *type);
+
+/*
+ * The original DSA tag format and some other tag formats have no
+ * ethertype, which means that we need to add a little hack to the
+ * networking receive path to make sure that received frames get
+ * the right ->protocol assigned to them when one of those tag
+ * formats is in use.
+ */
+static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
+{
+       return !!(dst->tag_protocol == htons(ETH_P_DSA));
+}
 
+static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+{
+       return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+}
 
 #endif
index 4fb6c43817918992f8334c49022d5184ea45c8e4..344c8dd028745857bdbe43efd85f386c0a64eade 100644 (file)
@@ -53,6 +53,7 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
+#define DST_NOPEER             0x0040
 
        short                   error;
        short                   obsolete;
@@ -86,12 +87,12 @@ struct dst_entry {
        };
 };
 
-static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
 {
        return rcu_dereference(dst->_neighbour);
 }
 
-static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
 {
        return rcu_dereference_raw(dst->_neighbour);
 }
@@ -205,12 +206,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
 
 static inline u32 dst_mtu(const struct dst_entry *dst)
 {
-       u32 mtu = dst_metric_raw(dst, RTAX_MTU);
-
-       if (!mtu)
-               mtu = dst->ops->default_mtu(dst);
-
-       return mtu;
+       return dst->ops->mtu(dst);
 }
 
 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
@@ -397,7 +393,7 @@ static inline void dst_confirm(struct dst_entry *dst)
                struct neighbour *n;
 
                rcu_read_lock();
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
                neigh_confirm(n);
                rcu_read_unlock();
        }
index 9adb99845a5695b8159cc6c208bc6004ebf7b25c..e1c2ee0eef47506020f743230e923d840e64e7a9 100644 (file)
@@ -17,7 +17,7 @@ struct dst_ops {
        int                     (*gc)(struct dst_ops *ops);
        struct dst_entry *      (*check)(struct dst_entry *, __u32 cookie);
        unsigned int            (*default_advmss)(const struct dst_entry *);
-       unsigned int            (*default_mtu)(const struct dst_entry *);
+       unsigned int            (*mtu)(const struct dst_entry *);
        u32 *                   (*cow_metrics)(struct dst_entry *, unsigned long);
        void                    (*destroy)(struct dst_entry *);
        void                    (*ifdown)(struct dst_entry *,
index a09447749e2d59a467c51cde514b35cef79c9b1b..da1f064a81b3744688545acef85fcaf507fe6c82 100644 (file)
@@ -59,8 +59,11 @@ struct flowi4 {
 #define flowi4_proto           __fl_common.flowic_proto
 #define flowi4_flags           __fl_common.flowic_flags
 #define flowi4_secid           __fl_common.flowic_secid
-       __be32                  daddr;
+
+       /* (saddr,daddr) must be grouped, same order as in IP header */
        __be32                  saddr;
+       __be32                  daddr;
+
        union flowi_uli         uli;
 #define fl4_sport              uli.ports.sport
 #define fl4_dport              uli.ports.dport
@@ -207,6 +210,7 @@ extern struct flow_cache_object *flow_cache_lookup(
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
new file mode 100644 (file)
index 0000000..80461c1
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _NET_FLOW_KEYS_H
+#define _NET_FLOW_KEYS_H
+
+struct flow_keys {
+       /* (src,dst) must be grouped, in the same way than in IP header */
+       __be32 src;
+       __be32 dst;
+       union {
+               __be32 ports;
+               __be16 port16[2];
+       };
+       u8 ip_proto;
+};
+
+extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+#endif
index 82d8d09faa44d4adeb108e127d2a22696a761b83..7db32995ccd34b6aa8298f79f0bbc42ae4ebc5bb 100644 (file)
@@ -128,6 +128,8 @@ extern int genl_register_mc_group(struct genl_family *family,
                                  struct genl_multicast_group *grp);
 extern void genl_unregister_mc_group(struct genl_family *family,
                                     struct genl_multicast_group *grp);
+extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+                       u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
 /**
  * genlmsg_put - Add generic netlink header to netlink message
index f0698b955b73c7ab41051fdf6508f4ab827932c0..75d615649071e39688b7f12a0e8a9c4066b8b31d 100644 (file)
@@ -31,8 +31,8 @@ struct icmp_err {
 extern const struct icmp_err icmp_err_convert[];
 #define ICMP_INC_STATS(net, field)     SNMP_INC_STATS((net)->mib.icmp_statistics, field)
 #define ICMP_INC_STATS_BH(net, field)  SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
-#define ICMPMSGOUT_INC_STATS(net, field)       SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field)     SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGOUT_INC_STATS(net, field)       SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
+#define ICMPMSGIN_INC_STATS_BH(net, field)     SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
 
 struct dst_entry;
 struct net_proto_family;
index d52685defb11e8e5fa7f277af4bb20d6dd1c9dc1..ee59f8b188ddfb081c948b27f54ac73839a9d2ef 100644 (file)
  * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
  * Maxim Osipov <maxim.osipov@siemens.com>
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  */
 
 #ifndef NET_IEEE802154_H
 #define NET_IEEE802154_H
 
+#define IEEE802154_MTU                 127
+
 #define IEEE802154_FC_TYPE_BEACON      0x0     /* Frame is beacon */
 #define        IEEE802154_FC_TYPE_DATA         0x1     /* Frame is data */
 #define IEEE802154_FC_TYPE_ACK         0x2     /* Frame is acknowledgment */
@@ -56,6 +59,9 @@
        (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
 
 
+/* MAC footer size */
+#define IEEE802154_MFR_SIZE    2 /* 2 octets */
+
 /* MAC's Command Frames Identifiers */
 #define IEEE802154_CMD_ASSOCIATION_REQ         0x01
 #define IEEE802154_CMD_ASSOCIATION_RESP                0x02
index e46674d5daea2c8f08c708c19e1211ef3b71d0b2..00cbb4384c795b470777bd501cb65c914df50f82 100644 (file)
@@ -15,7 +15,7 @@
 #define _INET6_HASHTABLES_H
 
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #include <linux/ipv6.h>
 #include <linux/types.h>
@@ -110,5 +110,5 @@ extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo
                                 const struct in6_addr *saddr, const __be16 sport,
                                 const struct in6_addr *daddr, const __be16 dport,
                                 const int dif);
-#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 #endif /* _INET6_HASHTABLES_H */
index e6db62e756dc57b14db644c4c220aedb34f09051..dbf9aab34c82747d0c62f76e7f0d569eb14a0e48 100644 (file)
@@ -143,9 +143,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
        return (void *)inet_csk(sk)->icsk_ca_priv;
 }
 
-extern struct sock *inet_csk_clone(struct sock *sk,
-                                  const struct request_sock *req,
-                                  const gfp_t priority);
+extern struct sock *inet_csk_clone_lock(const struct sock *sk,
+                                       const struct request_sock *req,
+                                       const gfp_t priority);
 
 enum inet_csk_ack_state_t {
        ICSK_ACK_SCHED  = 1,
index b897d6e6d0a5a35f6a19b5403bbd7b5cefa47a39..e3e405106afea5ac34eb5d84e0d92f192e810c31 100644 (file)
@@ -31,6 +31,7 @@
 /** struct ip_options - IP Options
  *
  * @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
  * @is_data - Options in __data, rather than skb
  * @is_strictroute - Strict source route
  * @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
  */
 struct ip_options {
        __be32          faddr;
+       __be32          nexthop;
        unsigned char   optlen;
        unsigned char   srr;
        unsigned char   rr;
@@ -69,7 +71,7 @@ struct ip_options_data {
 
 struct inet_request_sock {
        struct request_sock     req;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        u16                     inet6_rsk_offset;
 #endif
        __be16                  loc_port;
@@ -137,7 +139,7 @@ struct rtable;
 struct inet_sock {
        /* sk and pinet6 has to be the first two members of inet_sock */
        struct sock             sk;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct ipv6_pinfo       *pinet6;
 #endif
        /* Socket demultiplex comparisons on incoming packets. */
@@ -186,7 +188,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
        memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
               sk_from->sk_prot->obj_size - ancestor_size);
 }
-#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
+#if !(IS_ENABLED(CONFIG_IPV6))
 static inline void inet_sk_copy_descendant(struct sock *sk_to,
                                           const struct sock *sk_from)
 {
index e8c25b98120523711f115867b247995e1db80a57..ba52c830a7a54569795dcd30a32813e1dc580a75 100644 (file)
@@ -218,20 +218,12 @@ extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
 {
-#ifdef CONFIG_NET_NS
-       return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
-                                                 /* reference counting, */
-                                                 /* initialization, or RCU. */
-#else
-       return &init_net;
-#endif
+       return read_pnet(&twsk->tw_net);
 }
 
 static inline
 void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       rcu_assign_pointer(twsk->tw_net, net);
-#endif
+       write_pnet(&twsk->tw_net, net);
 }
 #endif /* _INET_TIMEWAIT_SOCK_ */
index 78c83e62218fbfff60506e4323f72343aed2b83a..06b795dd5906720886376b6e355dd13027ee6749 100644 (file)
@@ -35,6 +35,7 @@ struct inet_peer {
 
        u32                     metrics[RTAX_MAX];
        u32                     rate_tokens;    /* rate limiting for ICMP */
+       int                     redirect_genid;
        unsigned long           rate_last;
        unsigned long           pmtu_expires;
        u32                     pmtu_orig;
@@ -86,7 +87,7 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
 {
        struct inetpeer_addr daddr;
 
-       ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
+       *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
        daddr.family = AF_INET6;
        return inet_getpeer(&daddr, create);
 }
index eca0ef7a495e9d1605b7705e73ee3377bd8aaae5..775009f9eaba4870b8bcd3b83a4eb67af889b772 100644 (file)
@@ -353,14 +353,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast,
                memcpy(buf, &naddr, sizeof(naddr));
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
 static __inline__ void inet_reset_saddr(struct sock *sk)
 {
        inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == PF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
 
@@ -379,7 +379,7 @@ static inline int sk_mc_loop(struct sock *sk)
        switch (sk->sk_family) {
        case AF_INET:
                return inet_sk(sk)->mc_loop;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                return inet6_sk(sk)->mc_loop;
 #endif
@@ -450,7 +450,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
  *     Functions provided by ip_sockglue.c
  */
 
-extern int     ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+extern void    ipv4_pktinfo_prepare(struct sk_buff *skb);
 extern void    ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
 extern int     ip_cmsg_send(struct net *net,
                             struct msghdr *msg, struct ipcm_cookie *ipc);
index 5735a0f979c379a769c980fdc302ac73825f0d88..b26bb810198169c9b0d10462c282d284a6ad1bca 100644 (file)
@@ -86,9 +86,6 @@ struct fib6_table;
 struct rt6_info {
        struct dst_entry                dst;
 
-#define rt6i_dev                       dst.dev
-#define rt6i_expires                   dst.expires
-
        /*
         * Tail elements of dst_entry (__refcnt etc.)
         * and these elements (rarely used in hot path) are in
@@ -202,6 +199,10 @@ struct fib6_node           *fib6_locate(struct fib6_node *root,
                                             const struct in6_addr *daddr, int dst_len,
                                             const struct in6_addr *saddr, int src_len);
 
+extern void                    fib6_clean_all_ro(struct net *net,
+                                              int (*func)(struct rt6_info *, void *arg),
+                                              int prune, void *arg);
+
 extern void                    fib6_clean_all(struct net *net,
                                               int (*func)(struct rt6_info *, void *arg),
                                               int prune, void *arg);
index 5e91b72fc7183092bbba1ba367206b2cff1cf2b1..2ad92ca4e6f31b08bf08ba28bfe1ddda77b518ca 100644 (file)
@@ -70,6 +70,8 @@ extern void                   ip6_route_input(struct sk_buff *skb);
 extern struct dst_entry *      ip6_route_output(struct net *net,
                                                 const struct sock *sk,
                                                 struct flowi6 *fl6);
+extern struct dst_entry *      ip6_route_lookup(struct net *net,
+                                                struct flowi6 *fl6, int flags);
 
 extern int                     ip6_route_init(void);
 extern void                    ip6_route_cleanup(void);
@@ -95,14 +97,14 @@ extern struct rt6_info              *rt6_lookup(struct net *net,
 
 extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                                         struct neighbour *neigh,
-                                        const struct in6_addr *addr);
+                                        struct flowi6 *fl6);
 extern int icmp6_dst_gc(void);
 
 extern void fib6_force_start_gc(struct net *net);
 
 extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                           const struct in6_addr *addr,
-                                          int anycast);
+                                          bool anycast);
 
 extern int                     ip6_dst_hoplimit(struct dst_entry *dst);
 
index 873d5be7926c1bef17c83ea7c79eecafede9149e..ebe517f2da9fb31a7e26322d69944480d51a6b31 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/netfilter.h>           /* for union nf_inet_addr */
 #include <linux/ip.h>
 #include <linux/ipv6.h>                        /* for struct ipv6hdr */
-#include <net/ipv6.h>                  /* for ipv6_addr_copy */
+#include <net/ipv6.h>
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -119,8 +119,8 @@ ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
                const struct ipv6hdr *iph = nh;
                iphdr->len = sizeof(struct ipv6hdr);
                iphdr->protocol = iph->nexthdr;
-               ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
-               ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
+               iphdr->saddr.in6 = iph->saddr;
+               iphdr->daddr.in6 = iph->daddr;
        } else
 #endif
        {
@@ -137,7 +137,7 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
 {
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6)
-               ipv6_addr_copy(&dst->in6, &src->in6);
+               dst->in6 = src->in6;
        else
 #endif
        dst->ip = src->ip;
@@ -1207,7 +1207,7 @@ extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
                __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
-               __u16 protocol, __u32 fwmark);
+               __u16 protocol, __u32 fwmark, __u32 flags);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
index a366a8a1fe2380e92ce604a21d44e8e598e484f4..e4170a22fc6f12325d6757f07cf7430d1fbcbff3 100644 (file)
@@ -132,6 +132,15 @@ extern struct ctl_path net_ipv6_ctl_path[];
        SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
 })
 
+/* per device and per net counters are atomic_long_t */
+#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field)              \
+({                                                                     \
+       struct inet6_dev *_idev = (idev);                               \
+       if (likely(_idev != NULL))                                      \
+               SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+       SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
+})
+
 #define _DEVADD(net, statname, modifier, idev, field, val)             \
 ({                                                                     \
        struct inet6_dev *_idev = (idev);                               \
@@ -168,11 +177,11 @@ extern struct ctl_path net_ipv6_ctl_path[];
                _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
 
 #define ICMP6MSGOUT_INC_STATS(net, idev, field)                \
-       _DEVINCATOMIC(net, icmpv6msg, , idev, field +256)
+       _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
 #define ICMP6MSGOUT_INC_STATS_BH(net, idev, field)     \
-       _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field +256)
+       _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
 #define ICMP6MSGIN_INC_STATS_BH(net, idev, field)      \
-       _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field)
+       _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
 
 struct ip6_ra_chain {
        struct ip6_ra_chain     *next;
@@ -300,11 +309,6 @@ ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
                  ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
 }
 
-static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
-{
-       memcpy(a1, a2, sizeof(struct in6_addr));
-}
-
 static inline void ipv6_addr_prefix(struct in6_addr *pfx, 
                                    const struct in6_addr *addr,
                                    int plen)
@@ -554,7 +558,7 @@ extern void                 ipv6_push_frag_opts(struct sk_buff *skb,
                                                    u8 *proto);
 
 extern int                     ipv6_skip_exthdr(const struct sk_buff *, int start,
-                                                u8 *nexthdrp);
+                                                u8 *nexthdrp, __be16 *frag_offp);
 
 extern int                     ipv6_ext_hdr(u8 nexthdr);
 
index f2419cf44cefd96d899b0f18e0b9f33c582d04d0..0954ec9591594c9c5071ac11d07c6329da32952c 100644 (file)
@@ -27,7 +27,6 @@ enum {
        IUCV_OPEN,
        IUCV_BOUND,
        IUCV_LISTEN,
-       IUCV_SEVERED,
        IUCV_DISCONN,
        IUCV_CLOSING,
        IUCV_CLOSED
@@ -146,7 +145,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
                            poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int  iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
 void iucv_accept_unlink(struct sock *sk);
 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
index 62beeb97c4b16ac569165882440b820edf61ca29..e3133c23980efbfccb7da335305815416e7d8ec0 100644 (file)
@@ -79,6 +79,42 @@ struct nd_opt_hdr {
        __u8            nd_opt_len;
 } __packed;
 
+static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
+{
+       const u32 *p32 = pkey;
+
+       return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
+               (p32[1] * hash_rnd[1]) +
+               (p32[2] * hash_rnd[2]) +
+               (p32[3] * hash_rnd[3]));
+}
+
+static inline struct neighbour *__ipv6_neigh_lookup(struct neigh_table *tbl, struct net_device *dev, const void *pkey)
+{
+       struct neigh_hash_table *nht;
+       const u32 *p32 = pkey;
+       struct neighbour *n;
+       u32 hash_val;
+
+       rcu_read_lock_bh();
+       nht = rcu_dereference_bh(tbl->nht);
+       hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+            n != NULL;
+            n = rcu_dereference_bh(n->next)) {
+               u32 *n32 = (u32 *) n->primary_key;
+               if (n->dev == dev &&
+                   ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+                    (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) {
+                       if (!atomic_inc_not_zero(&n->refcnt))
+                               n = NULL;
+                       break;
+               }
+       }
+       rcu_read_unlock_bh();
+
+       return n;
+}
 
 extern int                     ndisc_init(void);
 
@@ -145,13 +181,4 @@ int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl,
 extern void                    inet6_ifinfo_notify(int event,
                                                    struct inet6_dev *idev);
 
-static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr)
-{
-
-       if (dev)
-               return __neigh_lookup_errno(&nd_tbl, addr, dev);
-
-       return ERR_PTR(-ENODEV);
-}
-
 #endif
index 2720884287c3e72e34786a58522d0ee020957c87..34c996f4618139329c98b272de08b94328abab38 100644 (file)
@@ -59,7 +59,7 @@ struct neigh_parms {
        int     reachable_time;
        int     delay_probe_time;
 
-       int     queue_len;
+       int     queue_len_bytes;
        int     ucast_probes;
        int     app_probes;
        int     mcast_probes;
@@ -99,6 +99,7 @@ struct neighbour {
        rwlock_t                lock;
        atomic_t                refcnt;
        struct sk_buff_head     arp_queue;
+       unsigned int            arp_queue_len_bytes;
        struct timer_list       timer;
        unsigned long           used;
        atomic_t                probes;
@@ -138,10 +139,12 @@ struct pneigh_entry {
  *     neighbour table manipulation
  */
 
+#define NEIGH_NUM_HASH_RND     4
+
 struct neigh_hash_table {
        struct neighbour __rcu  **hash_buckets;
        unsigned int            hash_shift;
-       __u32                   hash_rnd;
+       __u32                   hash_rnd[NEIGH_NUM_HASH_RND];
        struct rcu_head         rcu;
 };
 
@@ -153,7 +156,7 @@ struct neigh_table {
        int                     key_len;
        __u32                   (*hash)(const void *pkey,
                                        const struct net_device *dev,
-                                       __u32 hash_rnd);
+                                       __u32 *hash_rnd);
        int                     (*constructor)(struct neighbour *);
        int                     (*pconstructor)(struct pneigh_entry *);
        void                    (*pdestructor)(struct pneigh_entry *);
@@ -172,12 +175,18 @@ struct neigh_table {
        atomic_t                entries;
        rwlock_t                lock;
        unsigned long           last_rand;
-       struct kmem_cache       *kmem_cachep;
        struct neigh_statistics __percpu *stats;
        struct neigh_hash_table __rcu *nht;
        struct pneigh_entry     **phash_buckets;
 };
 
+#define NEIGH_PRIV_ALIGN       sizeof(long long)
+
+static inline void *neighbour_priv(const struct neighbour *n)
+{
+       return (char *)n + ALIGN(sizeof(*n) + n->tbl->key_len, NEIGH_PRIV_ALIGN);
+}
+
 /* flags for neigh_update() */
 #define NEIGH_UPDATE_F_OVERRIDE                        0x00000001
 #define NEIGH_UPDATE_F_WEAK_OVERRIDE           0x00000002
index 3bb6fa0eace013fd734ee7c39261a09c7910bb76..ee547c1498108ad17e25f46a339ff8eb5e49e663 100644 (file)
@@ -77,7 +77,7 @@ struct net {
        struct netns_packet     packet;
        struct netns_unix       unx;
        struct netns_ipv4       ipv4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netns_ipv6       ipv6;
 #endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
index 4e9c63a20db22dd498933745ed9cf2627e74fd47..463ae8e166965908d2fc0a60e92296123b846f2e 100644 (file)
@@ -15,8 +15,8 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 
 struct nf_conn_counter {
-       u_int64_t packets;
-       u_int64_t bytes;
+       atomic64_t packets;
+       atomic64_t bytes;
 };
 
 static inline
index 4283508b3e185882bff18fe267f205df4eda39cc..a88fb6939387f228ac5826949f68151e0fceaf16 100644 (file)
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
        int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
 
 extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
-       if (nf_conntrack_event_cb == NULL)
+       if (net->ct.nf_conntrack_event_cb == NULL)
                return;
 
        e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
                              int report)
 {
        int ret = 0;
+       struct net *net = nf_ct_net(ct);
        struct nf_ct_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_conntrack_event_cb);
+       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
        int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
 
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
                          u32 pid,
                          int report)
 {
+       struct net *net = nf_ct_exp_net(exp);
        struct nf_exp_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_expect_event_cb);
+       notify = rcu_dereference(net->ct.nf_expect_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
index 0f8a8c587532f78ac5f191d93a1088b03ba1fd78..4619caadd9d1055fd5caf3e7a07a10eb0e263d1b 100644 (file)
@@ -91,7 +91,6 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
 
 void nf_ct_remove_expectations(struct nf_conn *ct);
 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
-void nf_ct_remove_userspace_expectations(void);
 
 /* Allocate space for an expectation: this is mandatory before calling
    nf_ct_expect_related.  You will have to call put afterwards. */
index 2f8fb77bfdd1f7d9583e42ddbee7cf0240228e17..aea3f8221be08b2208586900447f363af796440b 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_conntrack_tuple_common.h>
-#include <linux/netfilter_ipv4/nf_nat.h>
 #include <linux/list_nulls.h>
 
 /* A `tuple' is a structure containing the information to uniquely
index b8872df7285f064e95679c64da141356f84de590..b4de990b55f123e7f2c9ad1095634743e2ad4698 100644 (file)
@@ -1,14 +1,12 @@
 #ifndef _NF_NAT_H
 #define _NF_NAT_H
 #include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/nf_nat.h>
+#include <linux/netfilter/nf_nat.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
 
-#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16
-
 enum nf_nat_manip_type {
-       IP_NAT_MANIP_SRC,
-       IP_NAT_MANIP_DST
+       NF_NAT_MANIP_SRC,
+       NF_NAT_MANIP_DST
 };
 
 /* SRC manip occurs POST_ROUTING or LOCAL_IN */
@@ -52,7 +50,7 @@ struct nf_conn_nat {
 
 /* Set up the info structure to map into this range. */
 extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
-                                     const struct nf_nat_range *range,
+                                     const struct nf_nat_ipv4_range *range,
                                      enum nf_nat_manip_type maniptype);
 
 /* Is this tuple already taken? (not by us)*/
index 3dc7b98effebd49e9079e25c458a479c2448f59a..b13d8d18d595b320d454b4d6b91e05d7aeea41b2 100644 (file)
@@ -20,7 +20,7 @@ extern int nf_nat_icmp_reply_translation(struct nf_conn *ct,
 static inline int nf_nat_initialized(struct nf_conn *ct,
                                     enum nf_nat_manip_type manip)
 {
-       if (manip == IP_NAT_MANIP_SRC)
+       if (manip == NF_NAT_MANIP_SRC)
                return ct->status & IPS_SRC_NAT_DONE;
        else
                return ct->status & IPS_DST_NAT_DONE;
index 93cc90d28e66e31047f7219e2c4a1cbd8ce6774c..7b0b51165f701a3fbb42c0d38e12d81737c2d53e 100644 (file)
@@ -4,14 +4,12 @@
 #include <net/netfilter/nf_nat.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 
-struct nf_nat_range;
+struct nf_nat_ipv4_range;
 
 struct nf_nat_protocol {
        /* Protocol number. */
        unsigned int protonum;
 
-       struct module *me;
-
        /* Translate a packet to the target according to manip type.
           Return true if succeeded. */
        bool (*manip_pkt)(struct sk_buff *skb,
@@ -30,15 +28,12 @@ struct nf_nat_protocol {
           possible.  Per-protocol part of tuple is initialized to the
           incoming packet. */
        void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
-                            const struct nf_nat_range *range,
+                            const struct nf_nat_ipv4_range *range,
                             enum nf_nat_manip_type maniptype,
                             const struct nf_conn *ct);
 
-       int (*range_to_nlattr)(struct sk_buff *skb,
-                              const struct nf_nat_range *range);
-
        int (*nlattr_to_range)(struct nlattr *tb[],
-                              struct nf_nat_range *range);
+                              struct nf_nat_ipv4_range *range);
 };
 
 /* Protocol registration. */
@@ -61,14 +56,12 @@ extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
                                  const union nf_conntrack_man_proto *max);
 
 extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-                                     const struct nf_nat_range *range,
+                                     const struct nf_nat_ipv4_range *range,
                                      enum nf_nat_manip_type maniptype,
                                      const struct nf_conn *ct,
                                      u_int16_t *rover);
 
-extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
-                                       const struct nf_nat_range *range);
 extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-                                       struct nf_nat_range *range);
+                                       struct nf_nat_ipv4_range *range);
 
 #endif /*_NF_NAT_PROTO_H*/
index e505358d89993c44720529a9890162a25518248f..75ca9291cf2ce307a8e54dec8a559a65069945f9 100644 (file)
@@ -131,7 +131,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
        return sk;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct sock *
 nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
index 0249399e51a773608814e6cabaeb36915d7c6533..7a911eca0f18b4a751b3410b75223080d5da4dbe 100644 (file)
@@ -18,6 +18,8 @@ struct netns_ct {
        struct hlist_nulls_head unconfirmed;
        struct hlist_nulls_head dying;
        struct ip_conntrack_stat __percpu *stat;
+       struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+       struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
        int                     sysctl_events;
        unsigned int            sysctl_events_retry_timeout;
        int                     sysctl_acct;
index d786b4fc02a40c0bb0b7e8beeec4b06f4c56bc54..bbd023a1c9b9affa8662f0b711ea1ff4203f5c9f 100644 (file)
@@ -55,6 +55,7 @@ struct netns_ipv4 {
        int current_rt_cache_rebuild_count;
 
        unsigned int sysctl_ping_group_range[2];
+       long sysctl_tcp_mem[3];
 
        atomic_t rt_genid;
        atomic_t dev_addr_genid;
index 0b44112e2366e535a8d6ab7a8fcc726f4aad8de6..d542a4b28ccab0bb92e391b971994b37e0b5077a 100644 (file)
@@ -10,15 +10,15 @@ struct netns_mib {
        DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
        DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
        DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
-       DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics);
+       DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct proc_dir_entry *proc_net_devsnmp6;
        DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
        DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
        DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
        DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
-       DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics);
+       DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
 #endif
 #ifdef CONFIG_XFRM_STATISTICS
        DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
index 748f91f87cd573783efcf59a615292a86b2c289e..5299e69a32afd91a6bd577ee2f7427d8c37bdf3b 100644 (file)
@@ -56,7 +56,7 @@ struct netns_xfrm {
 #endif
 
        struct dst_ops          xfrm4_dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct dst_ops          xfrm6_dst_ops;
 #endif
 };
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
new file mode 100644 (file)
index 0000000..e503b87
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * netprio_cgroup.h                    Control Group Priority set
+ *
+ *
+ * Authors:    Neil Horman <nhorman@tuxdriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _NETPRIO_CGROUP_H
+#define _NETPRIO_CGROUP_H
+#include <linux/module.h>
+#include <linux/cgroup.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+
+
+struct netprio_map {
+       struct rcu_head rcu;
+       u32 priomap_len;
+       u32 priomap[];
+};
+
+#ifdef CONFIG_CGROUPS
+
+struct cgroup_netprio_state {
+       struct cgroup_subsys_state css;
+       u32 prioidx;
+};
+
+#ifndef CONFIG_NETPRIO_CGROUP
+extern int net_prio_subsys_id;
+#endif
+
+extern void sock_update_netprioidx(struct sock *sk);
+
+static inline struct cgroup_netprio_state
+               *task_netprio_state(struct task_struct *p)
+{
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+       return container_of(task_subsys_state(p, net_prio_subsys_id),
+                           struct cgroup_netprio_state, css);
+#else
+       return NULL;
+#endif
+}
+
+#else
+
+#define sock_update_netprioidx(sk)
+#endif
+
+#endif  /* _NET_CLS_CGROUP_H */
index 6f7eb800974af3fae4c12a7d054d8e61d9e2cf9f..875f4895b0332cb95678eb1278fc9dd4a2ba5fe2 100644 (file)
@@ -25,7 +25,7 @@
 #define _PROTOCOL_H
 
 #include <linux/in6.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -38,7 +38,7 @@ struct net_protocol {
        void                    (*err_handler)(struct sk_buff *skb, u32 info);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff         *(*gso_segment)(struct sk_buff *skb,
-                                              u32 features);
+                                              netdev_features_t features);
        struct sk_buff        **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb);
@@ -46,7 +46,7 @@ struct net_protocol {
                                netns_ok:1;
 };
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
        int     (*handler)(struct sk_buff *skb);
 
@@ -57,7 +57,7 @@ struct inet6_protocol {
 
        int     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff *(*gso_segment)(struct sk_buff *skb,
-                                      u32 features);
+                                      netdev_features_t features);
        struct sk_buff **(*gro_receive)(struct sk_buff **head,
                                        struct sk_buff *skb);
        int     (*gro_complete)(struct sk_buff *skb);
@@ -91,7 +91,7 @@ struct inet_protosw {
 
 extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
@@ -100,7 +100,7 @@ extern int  inet_del_protocol(const struct net_protocol *prot, unsigned char num)
 extern void    inet_register_protosw(struct inet_protosw *p);
 extern void    inet_unregister_protosw(struct inet_protosw *p);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern int     inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int     inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int     inet6_register_protosw(struct inet_protosw *p);
index 3319f16b3beb899727c7a434e75fb1010d7ee139..ef715a16cce45672474eee575d71dd0ff7b325d4 100644 (file)
@@ -5,6 +5,7 @@
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/dsfield.h>
+#include <linux/reciprocal_div.h>
 
 /*     Random Early Detection (RED) algorithm.
        =======================================
        etc.
  */
 
+/*
+ * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
+ * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
+ *
+ * Every 500 ms:
+ *  if (avg > target and max_p <= 0.5)
+ *   increase max_p : max_p += alpha;
+ *  else if (avg < target and max_p >= 0.01)
+ *   decrease max_p : max_p *= beta;
+ *
+ * target :[qth_min + 0.4*(qth_min - qth_max),
+ *          qth_min + 0.6*(qth_min - qth_max)].
+ * alpha : min(0.01, max_p / 4)
+ * beta : 0.9
+ * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
+ * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
+ */
+#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
+
+#define MAX_P_MIN (1 * RED_ONE_PERCENT)
+#define MAX_P_MAX (50 * RED_ONE_PERCENT)
+#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
+
 #define RED_STAB_SIZE  256
 #define RED_STAB_MASK  (RED_STAB_SIZE - 1)
 
@@ -101,10 +125,14 @@ struct red_stats {
 
 struct red_parms {
        /* Parameters */
-       u32             qth_min;        /* Min avg length threshold: A scaled */
-       u32             qth_max;        /* Max avg length threshold: A scaled */
+       u32             qth_min;        /* Min avg length threshold: Wlog scaled */
+       u32             qth_max;        /* Max avg length threshold: Wlog scaled */
        u32             Scell_max;
-       u32             Rmask;          /* Cached random mask, see red_rmask */
+       u32             max_P;          /* probability, [0 .. 1.0] 32 scaled */
+       u32             max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */
+       u32             qth_delta;      /* max_th - min_th */
+       u32             target_min;     /* min_th + 0.4*(max_th - min_th) */
+       u32             target_max;     /* min_th + 0.6*(max_th - min_th) */
        u8              Scell_log;
        u8              Wlog;           /* log(W)               */
        u8              Plog;           /* random number bits   */
@@ -115,19 +143,23 @@ struct red_parms {
                                           number generation */
        u32             qR;             /* Cached random number */
 
-       unsigned long   qavg;           /* Average queue length: A scaled */
-       psched_time_t   qidlestart;     /* Start of current idle period */
+       unsigned long   qavg;           /* Average queue length: Wlog scaled */
+       ktime_t         qidlestart;     /* Start of current idle period */
 };
 
-static inline u32 red_rmask(u8 Plog)
+static inline u32 red_maxp(u8 Plog)
 {
-       return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
+       return Plog < 32 ? (~0U >> Plog) : ~0U;
 }
 
+
 static inline void red_set_parms(struct red_parms *p,
                                 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
-                                u8 Scell_log, u8 *stab)
+                                u8 Scell_log, u8 *stab, u32 max_P)
 {
+       int delta = qth_max - qth_min;
+       u32 max_p_delta;
+
        /* Reset average queue length, the value is strictly bound
         * to the parameters below, reseting hurts a bit but leaving
         * it might result in an unreasonable qavg for a while. --TGR
@@ -139,26 +171,45 @@ static inline void red_set_parms(struct red_parms *p,
        p->qth_max      = qth_max << Wlog;
        p->Wlog         = Wlog;
        p->Plog         = Plog;
-       p->Rmask        = red_rmask(Plog);
+       if (delta < 0)
+               delta = 1;
+       p->qth_delta    = delta;
+       if (!max_P) {
+               max_P = red_maxp(Plog);
+               max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
+       }
+       p->max_P = max_P;
+       max_p_delta = max_P / delta;
+       max_p_delta = max(max_p_delta, 1U);
+       p->max_P_reciprocal  = reciprocal_value(max_p_delta);
+
+       /* RED Adaptative target :
+        * [min_th + 0.4*(min_th - max_th),
+        *  min_th + 0.6*(min_th - max_th)].
+        */
+       delta /= 5;
+       p->target_min = qth_min + 2*delta;
+       p->target_max = qth_min + 3*delta;
+
        p->Scell_log    = Scell_log;
        p->Scell_max    = (255 << Scell_log);
 
        memcpy(p->Stab, stab, sizeof(p->Stab));
 }
 
-static inline int red_is_idling(struct red_parms *p)
+static inline int red_is_idling(const struct red_parms *p)
 {
-       return p->qidlestart != PSCHED_PASTPERFECT;
+       return p->qidlestart.tv64 != 0;
 }
 
 static inline void red_start_of_idle_period(struct red_parms *p)
 {
-       p->qidlestart = psched_get_time();
+       p->qidlestart = ktime_get();
 }
 
 static inline void red_end_of_idle_period(struct red_parms *p)
 {
-       p->qidlestart = PSCHED_PASTPERFECT;
+       p->qidlestart.tv64 = 0;
 }
 
 static inline void red_restart(struct red_parms *p)
@@ -168,15 +219,12 @@ static inline void red_restart(struct red_parms *p)
        p->qcount = -1;
 }
 
-static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
+static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p)
 {
-       psched_time_t now;
-       long us_idle;
+       s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+       long us_idle = min_t(s64, delta, p->Scell_max);
        int  shift;
 
-       now = psched_get_time();
-       us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
        /*
         * The problem: ideally, average length queue recalcultion should
         * be done over constant clock intervals. This is too expensive, so
@@ -218,7 +266,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
        }
 }
 
-static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
+static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
                                                       unsigned int backlog)
 {
        /*
@@ -233,7 +281,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
        return p->qavg + (backlog - (p->qavg >> p->Wlog));
 }
 
-static inline unsigned long red_calc_qavg(struct red_parms *p,
+static inline unsigned long red_calc_qavg(const struct red_parms *p,
                                          unsigned int backlog)
 {
        if (!red_is_idling(p))
@@ -242,23 +290,24 @@ static inline unsigned long red_calc_qavg(struct red_parms *p,
                return red_calc_qavg_from_idle_time(p);
 }
 
-static inline u32 red_random(struct red_parms *p)
+
+static inline u32 red_random(const struct red_parms *p)
 {
-       return net_random() & p->Rmask;
+       return reciprocal_divide(net_random(), p->max_P_reciprocal);
 }
 
-static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
+static inline int red_mark_probability(const struct red_parms *p, unsigned long qavg)
 {
        /* The formula used below causes questions.
 
-          OK. qR is random number in the interval 0..Rmask
+          OK. qR is random number in the interval
+               (0..1/max_P)*(qth_max-qth_min)
           i.e. 0..(2^Plog). If we used floating point
           arithmetics, it would be: (2^Plog)*rnd_num,
           where rnd_num is less 1.
 
           Taking into account, that qavg have fixed
-          point at Wlog, and Plog is related to max_P by
-          max_P = (qth_max-qth_min)/2^Plog; two lines
+          point at Wlog, two lines
           below have the following floating point equivalent:
 
           max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
@@ -318,4 +367,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg)
        return RED_DONT_MARK;
 }
 
+static inline void red_adaptative_algo(struct red_parms *p)
+{
+       unsigned long qavg;
+       u32 max_p_delta;
+
+       qavg = p->qavg;
+       if (red_is_idling(p))
+               qavg = red_calc_qavg_from_idle_time(p);
+
+       /* p->qavg is fixed point number with point at Wlog */
+       qavg >>= p->Wlog;
+
+       if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+               p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
+       else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
+               p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
+
+       max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
+       max_p_delta = max(max_p_delta, 1U);
+       p->max_P_reciprocal = reciprocal_value(max_p_delta);
+}
 #endif
index db7b3432f07c41ce124c9d2a792035cead8b048d..91855d185b537f96fc0ea09134c96a93b63aa3d3 100644 (file)
@@ -71,12 +71,12 @@ struct rtable {
        struct fib_info         *fi; /* for client ref to shared metrics */
 };
 
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
 {
        return rt->rt_route_iif != 0;
 }
 
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline bool rt_is_output_route(const struct rtable *rt)
 {
        return rt->rt_route_iif == 0;
 }
index 6a72a58cde59cd94b5f2dd8d2fdad3fd08009665..d3685615a8b0eeff1d0f7ca032fa04c3fd5000c5 100644 (file)
@@ -71,7 +71,7 @@
 #include <linux/jiffies.h>
 #include <linux/idr.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #endif
@@ -383,7 +383,7 @@ static inline void sctp_sysctl_unregister(void) { return; }
 /* Size of Supported Address Parameter for 'x' address types. */
 #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 void sctp_v6_pf_init(void);
 void sctp_v6_pf_exit(void);
index e90e7a9935ddc5c70c8e920487bc1196c2cc49e2..88949a9945387c1db7fbb5cfaa825b0c95af9a68 100644 (file)
@@ -235,12 +235,15 @@ extern struct sctp_globals {
 
        /* Flag to indicate whether computing and verifying checksum
         * is disabled. */
-        int checksum_disable;
+        bool checksum_disable;
 
        /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
         * bits is an indicator of when to send and window update SACK.
         */
        int rwnd_update_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial               (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
 #define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
@@ -365,7 +369,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
        return (struct sock *)sp;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct sctp6_sock {
        struct sctp_sock  sctp;
        struct ipv6_pinfo inet6;
@@ -1085,6 +1089,7 @@ void sctp_transport_burst_reset(struct sctp_transport *);
 unsigned long sctp_transport_timeout(struct sctp_transport *);
 void sctp_transport_reset(struct sctp_transport *);
 void sctp_transport_update_pmtu(struct sctp_transport *, u32);
+void sctp_transport_immediate_rtx(struct sctp_transport *);
 
 
 /* This is the structure we use to queue packets as they come into
index 8f0f9ac0307ffc3ead58ce031f62d8ec8b2d1f7e..2f65e1686fc85f3b678106725053123b7b7b4747 100644 (file)
@@ -67,7 +67,7 @@ struct icmp_mib {
 
 #define ICMPMSG_MIB_MAX        __ICMPMSG_MIB_MAX
 struct icmpmsg_mib {
-       unsigned long   mibs[ICMPMSG_MIB_MAX];
+       atomic_long_t   mibs[ICMPMSG_MIB_MAX];
 };
 
 /* ICMP6 (IPv6-ICMP) */
@@ -84,7 +84,7 @@ struct icmpv6_mib_device {
 #define ICMP6MSG_MIB_MAX  __ICMP6MSG_MIB_MAX
 /* per network ns counters */
 struct icmpv6msg_mib {
-       unsigned long   mibs[ICMP6MSG_MIB_MAX];
+       atomic_long_t   mibs[ICMP6MSG_MIB_MAX];
 };
 /* per device counters, (shared on all cpus) */
 struct icmpv6msg_mib_device {
index c0c32a4cdd070418e7cd885bc69aadd33e857e49..bb972d254dff4550301b189bb487e554da02ae58 100644 (file)
@@ -53,6 +53,8 @@
 #include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 
+struct cgroup;
+struct cgroup_subsys;
+#ifdef CONFIG_NET
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
+#else
+static inline
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       return 0;
+}
+static inline
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+}
+#endif
 /*
  * This structure really needs to be cleaned up.
  * Most of it is for TCP, and not used by any of
@@ -167,6 +185,7 @@ struct sock_common {
        /* public: */
 };
 
+struct cg_proto;
 /**
   *    struct sock - network layer representation of sockets
   *    @__sk_common: shared layout with inet_timewait_sock
@@ -227,6 +246,7 @@ struct sock_common {
   *    @sk_security: used by security modules
   *    @sk_mark: generic packet mark
   *    @sk_classid: this socket's cgroup classid
+  *    @sk_cgrp: this socket's cgroup-specific proto data
   *    @sk_write_pending: a write to stream socket waits to start
   *    @sk_state_change: callback to indicate change in the state of the sock
   *    @sk_data_ready: callback to indicate there is data to be processed
@@ -306,8 +326,8 @@ struct sock {
        kmemcheck_bitfield_end(flags);
        int                     sk_wmem_queued;
        gfp_t                   sk_allocation;
-       int                     sk_route_caps;
-       int                     sk_route_nocaps;
+       netdev_features_t       sk_route_caps;
+       netdev_features_t       sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
        int                     sk_rcvlowat;
@@ -320,6 +340,9 @@ struct sock {
        unsigned short          sk_ack_backlog;
        unsigned short          sk_max_ack_backlog;
        __u32                   sk_priority;
+#ifdef CONFIG_CGROUPS
+       __u32                   sk_cgrp_prioidx;
+#endif
        struct pid              *sk_peer_pid;
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
@@ -338,6 +361,7 @@ struct sock {
 #endif
        __u32                   sk_mark;
        u32                     sk_classid;
+       struct cg_proto         *sk_cgrp;
        void                    (*sk_state_change)(struct sock *sk);
        void                    (*sk_data_ready)(struct sock *sk, int bytes);
        void                    (*sk_write_space)(struct sock *sk);
@@ -638,12 +662,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize + skb->truesize > sk->sk_rcvbuf;
+       return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
@@ -834,6 +860,37 @@ struct proto {
 #ifdef SOCK_REFCNT_DEBUG
        atomic_t                socks;
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       /*
+        * cgroup specific init/deinit functions. Called once for all
+        * protocols that implement it, from cgroups populate function.
+        * This function has to setup any files the protocol want to
+        * appear in the kmem cgroup filesystem.
+        */
+       int                     (*init_cgroup)(struct cgroup *cgrp,
+                                              struct cgroup_subsys *ss);
+       void                    (*destroy_cgroup)(struct cgroup *cgrp,
+                                                 struct cgroup_subsys *ss);
+       struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
+#endif
+};
+
+struct cg_proto {
+       void                    (*enter_memory_pressure)(struct sock *sk);
+       struct res_counter      *memory_allocated;      /* Current allocated memory. */
+       struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
+       int                     *memory_pressure;
+       long                    *sysctl_mem;
+       /*
+        * memcg field is used to find which memcg we belong directly
+        * Each memcg struct can hold more than one cg_proto, so container_of
+        * won't really cut.
+        *
+        * The elegant solution would be having an inverse function to
+        * proto_cgroup in struct proto, but that means polluting the structure
+        * for everybody, instead of just for memcg users.
+        */
+       struct mem_cgroup       *memcg;
 };
 
 extern int proto_register(struct proto *prot, int alloc_slab);
@@ -852,7 +909,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
               sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
 }
 
-static inline void sk_refcnt_debug_release(const struct sock *sk)
+inline void sk_refcnt_debug_release(const struct sock *sk)
 {
        if (atomic_read(&sk->sk_refcnt) != 1)
                printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
@@ -864,6 +921,208 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
 #define sk_refcnt_debug_release(sk) do { } while (0)
 #endif /* SOCK_REFCNT_DEBUG */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+extern struct jump_label_key memcg_socket_limit_enabled;
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+                                              struct cg_proto *cg_proto)
+{
+       return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
+}
+#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#else
+#define mem_cgroup_sockets_enabled 0
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+                                              struct cg_proto *cg_proto)
+{
+       return NULL;
+}
+#endif
+
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+       return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+       if (!sk->sk_prot->memory_pressure)
+               return false;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return !!*sk->sk_cgrp->memory_pressure;
+
+       return !!*sk->sk_prot->memory_pressure;
+}
+
+static inline void sk_leave_memory_pressure(struct sock *sk)
+{
+       int *memory_pressure = sk->sk_prot->memory_pressure;
+
+       if (!memory_pressure)
+               return;
+
+       if (*memory_pressure)
+               *memory_pressure = 0;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+               struct proto *prot = sk->sk_prot;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       if (*cg_proto->memory_pressure)
+                               *cg_proto->memory_pressure = 0;
+       }
+
+}
+
+static inline void sk_enter_memory_pressure(struct sock *sk)
+{
+       if (!sk->sk_prot->enter_memory_pressure)
+               return;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+               struct proto *prot = sk->sk_prot;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       cg_proto->enter_memory_pressure(sk);
+       }
+
+       sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+       long *prot = sk->sk_prot->sysctl_mem;
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               prot = sk->sk_cgrp->sysctl_mem;
+       return prot[index];
+}
+
+static inline void memcg_memory_allocated_add(struct cg_proto *prot,
+                                             unsigned long amt,
+                                             int *parent_status)
+{
+       struct res_counter *fail;
+       int ret;
+
+       ret = res_counter_charge(prot->memory_allocated,
+                                amt << PAGE_SHIFT, &fail);
+
+       if (ret < 0)
+               *parent_status = OVER_LIMIT;
+}
+
+static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
+                                             unsigned long amt)
+{
+       res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+}
+
+static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
+{
+       u64 ret;
+       ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+       return ret >> PAGE_SHIFT;
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return memcg_memory_allocated_read(sk->sk_cgrp);
+
+       return atomic_long_read(prot->memory_allocated);
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
+               /* update the root cgroup regardless */
+               atomic_long_add_return(amt, prot->memory_allocated);
+               return memcg_memory_allocated_read(sk->sk_cgrp);
+       }
+
+       return atomic_long_add_return(amt, prot->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+           parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+               memcg_memory_allocated_sub(sk->sk_cgrp, amt);
+
+       atomic_long_sub(amt, prot->memory_allocated);
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       percpu_counter_dec(cg_proto->sockets_allocated);
+       }
+
+       percpu_counter_dec(prot->sockets_allocated);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       percpu_counter_inc(cg_proto->sockets_allocated);
+       }
+
+       percpu_counter_inc(prot->sockets_allocated);
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
+
+       return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+       return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline long
+proto_memory_allocated(struct proto *prot)
+{
+       return atomic_long_read(prot->memory_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+       if (!prot->memory_pressure)
+               return false;
+       return !!*prot->memory_pressure;
+}
+
 
 #ifdef CONFIG_PROC_FS
 /* Called with local bh disabled */
@@ -1090,8 +1349,8 @@ extern struct sock                *sk_alloc(struct net *net, int family,
                                          struct proto *prot);
 extern void                    sk_free(struct sock *sk);
 extern void                    sk_release_kernel(struct sock *sk);
-extern struct sock             *sk_clone(const struct sock *sk,
-                                         const gfp_t priority);
+extern struct sock             *sk_clone_lock(const struct sock *sk,
+                                              const gfp_t priority);
 
 extern struct sk_buff          *sock_wmalloc(struct sock *sk,
                                              unsigned long size, int force,
@@ -1394,7 +1653,7 @@ static inline int sk_can_gso(const struct sock *sk)
 
 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
-static inline void sk_nocaps_add(struct sock *sk, int flags)
+static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 {
        sk->sk_route_nocaps |= flags;
        sk->sk_route_caps &= ~flags;
@@ -1671,7 +1930,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
 
        page = alloc_pages(sk->sk_allocation, 0);
        if (!page) {
-               sk->sk_prot->enter_memory_pressure(sk);
+               sk_enter_memory_pressure(sk);
                sk_stream_moderate_sndbuf(sk);
        }
        return page;
index bb18c4d69aba1da89488af60999f03341648ee61..0118ea999f67a882f6e4e389aa9f2b685e571955 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/dst.h>
 
 #include <linux/seq_file.h>
+#include <linux/memcontrol.h>
 
 extern struct inet_hashinfo tcp_hashinfo;
 
@@ -229,7 +230,6 @@ extern int sysctl_tcp_fack;
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_ecn;
 extern int sysctl_tcp_dsack;
-extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -285,7 +285,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
        }
 
        if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-           atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+           sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
                return true;
        return false;
 }
@@ -628,7 +628,7 @@ extern u32 __tcp_select_window(struct sock *sk);
 struct tcp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;       /* For incoming frames          */
@@ -773,12 +773,12 @@ static inline int tcp_is_reno(const struct tcp_sock *tp)
 
 static inline int tcp_is_fack(const struct tcp_sock *tp)
 {
-       return tp->rx_opt.sack_ok & 2;
+       return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 }
 
 static inline void tcp_enable_fack(struct tcp_sock *tp)
 {
-       tp->rx_opt.sack_ok |= 2;
+       tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 }
 
 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
@@ -834,6 +834,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
+/* The maximum number of MSS of available cwnd for which TSO defers
+ * sending if not using sysctl_tcp_tso_win_divisor.
+ */
+static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
+{
+       return 3;
+}
+
 /* Slow start with delack produces 3 packets of burst, so that
  * it is safe "de facto".  This will be the default - same as
  * the default reordering threshold - but if reordering increases,
@@ -1144,7 +1152,7 @@ struct tcp6_md5sig_key {
 /* - sock block */
 struct tcp_md5sig_info {
        struct tcp4_md5sig_key  *keys4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct tcp6_md5sig_key  *keys6;
        u32                     entries6;
        u32                     alloced6;
@@ -1171,7 +1179,7 @@ struct tcp6_pseudohdr {
 
 union tcp_md5sum_block {
        struct tcp4_pseudohdr ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct tcp6_pseudohdr ip6;
 #endif
 };
@@ -1430,7 +1438,8 @@ extern struct request_sock_ops tcp6_request_sock_ops;
 extern void tcp_v4_destroy_sock(struct sock *sk);
 
 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+                                      netdev_features_t features);
 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
                                        struct sk_buff *skb);
 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
new file mode 100644 (file)
index 0000000..3512082
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _TCP_MEMCG_H
+#define _TCP_MEMCG_H
+
+struct tcp_memcontrol {
+       struct cg_proto cg_proto;
+       /* per-cgroup tcp memory pressure knobs */
+       struct res_counter tcp_memory_allocated;
+       struct percpu_counter tcp_sockets_allocated;
+       /* those two are read-mostly, leave them at the end */
+       long tcp_prot_mem[3];
+       int tcp_memory_pressure;
+};
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
+#endif /* _TCP_MEMCG_H */
index 3b285f402f480e76aa9b69136447df9f6d66bb65..e39592f682c3fb55c42ea2f04d027bdbbde78fc5 100644 (file)
@@ -41,7 +41,7 @@
 struct udp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
@@ -194,9 +194,15 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
                                    __be32 daddr, __be16 dport,
                                    int dif);
+extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                                   __be32 daddr, __be16 dport,
+                                   int dif, struct udp_table *tbl);
 extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
                                    const struct in6_addr *daddr, __be16 dport,
                                    int dif);
+extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
+                                   const struct in6_addr *daddr, __be16 dport,
+                                   int dif, struct udp_table *tbl);
 
 /*
  *     SNMP statistics for UDP and UDP-Lite
@@ -217,7 +223,7 @@ extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *sadd
        else        SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field);      \
 } while(0)
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define UDPX_INC_STATS_BH(sk, field) \
        do { \
                if ((sk)->sk_family == AF_INET) \
@@ -258,5 +264,6 @@ extern void udp4_proc_exit(void);
 extern void udp_init(void);
 
 extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+       netdev_features_t features);
 #endif /* _UDP_H */
index b203e14d26b7f59a5f884b87183d3ecb74447d2a..89174e29dca99bf943aed6d3ed6544fd040f002e 100644 (file)
@@ -827,6 +827,14 @@ static inline bool addr_match(const void *token1, const void *token2,
        return true;
 }
 
+static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
+{
+       /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
+       if (prefixlen == 0)
+               return true;
+       return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
+}
+
 static __inline__
 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
 {
@@ -1209,8 +1217,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
                memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
                break;
        case AF_INET6:
-               ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr);
-               ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr);
+               *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
+               *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
                break;
        }
 }
index d1e95c6ac7769b2d7e328eefddf8c6f4a3077010..5a35a2a2d3c514bef92ef7bed3d32216cc08c206 100644 (file)
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
        u8 map_dest;
        u8 spma;
        u8 probe_tries;
+       u8 priority;
        u8 dest_addr[ETH_ALEN];
        u8 ctl_src_addr[ETH_ALEN];
 
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
  * @lport:                    The associated local port
  * @fcoe_pending_queue:               The pending Rx queue of skbs
  * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority:                 Packet priority (DCB)
  * @max_queue_depth:          Max queue depth of pending queue
  * @min_queue_depth:          Min queue depth of pending queue
  * @timer:                    The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
        struct fc_lport       *lport;
        struct sk_buff_head   fcoe_pending_queue;
        u8                    fcoe_pending_queue_active;
+       u8                    priority;
        u32                   max_queue_depth;
        u32                   min_queue_depth;
        struct timer_list     timer;
index 7f5fed3c89e1808f6f2535ea06bb20cdd772b7f6..6873c7dd9145d2a23f682d9e8d2c695a7571d656 100644 (file)
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
        SCF_SCSI_NON_DATA_CDB           = 0x00000040,
        SCF_SCSI_CDB_EXCEPTION          = 0x00000080,
        SCF_SCSI_RESERVATION_CONFLICT   = 0x00000100,
-       SCF_SE_CMD_FAILED               = 0x00000400,
+       SCF_FUA                         = 0x00000200,
        SCF_SE_LUN_CMD                  = 0x00000800,
        SCF_SE_ALLOW_EOO                = 0x00001000,
+       SCF_BIDI                        = 0x00002000,
        SCF_SENT_CHECK_CONDITION        = 0x00004000,
        SCF_OVERFLOW_BIT                = 0x00008000,
        SCF_UNDERFLOW_BIT               = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
        TCM_CHECK_CONDITION_ABORT_CMD           = 0x0d,
        TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
        TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
+       TCM_RESERVATION_CONFLICT                = 0x10,
 };
 
 struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
        u16     lu_gp_id;
        int     lu_gp_valid_id;
        u32     lu_gp_members;
-       atomic_t lu_gp_shutdown;
        atomic_t lu_gp_ref_cnt;
        spinlock_t lu_gp_lock;
        struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
        int                     sam_task_attr;
        /* Transport protocol dependent state, see transport_state_table */
        enum transport_state_table t_state;
-       /* Transport specific error status */
-       int                     transport_error_status;
        /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
-       int                     check_release:1;
-       int                     cmd_wait_set:1;
+       unsigned                check_release:1;
+       unsigned                cmd_wait_set:1;
        /* See se_cmd_flags_table */
        u32                     se_cmd_flags;
        u32                     se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
        /* Used for sense data */
        void                    *sense_buffer;
        struct list_head        se_delayed_node;
-       struct list_head        se_ordered_node;
        struct list_head        se_lun_node;
        struct list_head        se_qf_node;
        struct se_device      *se_dev;
        struct se_dev_entry   *se_deve;
-       struct se_device        *se_obj_ptr;
-       struct se_device        *se_orig_obj_ptr;
        struct se_lun           *se_lun;
        /* Only used for internal passthrough and legacy TCM fabric modules */
        struct se_session       *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
        unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
        unsigned long long      t_task_lba;
        int                     t_tasks_failed;
-       int                     t_tasks_fua;
-       bool                    t_tasks_bidi;
        u32                     t_tasks_sg_chained_no;
        atomic_t                t_fe_count;
        atomic_t                t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
 
        struct work_struct      work;
 
-       /*
-        * Used for pre-registered fabric SGL passthrough WRITE and READ
-        * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
-        * and other HW target mode fabric modules.
-        */
-       struct scatterlist      *t_task_pt_sgl;
-       u32                     t_task_pt_sgl_num;
-
        struct scatterlist      *t_data_sg;
        unsigned int            t_data_nents;
        struct scatterlist      *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
 } ____cacheline_aligned;
 
 struct se_session {
-       int                     sess_tearing_down:1;
+       unsigned                sess_tearing_down:1;
        u64                     sess_bin_isid;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
        struct t10_reservation t10_pr;
        spinlock_t      se_dev_lock;
        void            *se_dev_su_ptr;
-       struct list_head se_dev_node;
        struct config_group se_dev_group;
        /* For T10 Reservations */
        struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
 } ____cacheline_aligned;
 
 struct se_device {
-       /* Set to 1 if thread is NOT sleeping on thread_sem */
-       u8                      thread_active;
-       u8                      dev_status_timer_flags;
        /* RELATIVE TARGET PORT IDENTIFER Counter */
        u16                     dev_rpti_counter;
        /* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
        u64                     write_bytes;
        spinlock_t              stats_lock;
        /* Active commands on this virtual SE device */
-       atomic_t                active_cmds;
        atomic_t                simple_cmds;
        atomic_t                depth_left;
        atomic_t                dev_ordered_id;
-       atomic_t                dev_tur_active;
        atomic_t                execute_tasks;
-       atomic_t                dev_status_thr_count;
-       atomic_t                dev_hoq_count;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
        struct se_obj           dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
        struct se_obj           dev_export_obj;
        struct se_queue_obj     dev_queue_obj;
        spinlock_t              delayed_cmd_lock;
-       spinlock_t              ordered_cmd_lock;
        spinlock_t              execute_task_lock;
-       spinlock_t              state_task_lock;
-       spinlock_t              dev_alua_lock;
        spinlock_t              dev_reservation_lock;
-       spinlock_t              dev_state_lock;
        spinlock_t              dev_status_lock;
-       spinlock_t              dev_status_thr_lock;
        spinlock_t              se_port_lock;
        spinlock_t              se_tmr_lock;
        spinlock_t              qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
        struct t10_pr_registration *dev_pr_res_holder;
        struct list_head        dev_sep_list;
        struct list_head        dev_tmr_list;
-       struct timer_list       dev_status_timer;
        /* Pointer to descriptor for processing thread */
        struct task_struct      *process_thread;
-       pid_t                   process_thread_pid;
-       struct task_struct              *dev_mgmt_thread;
        struct work_struct      qf_work_queue;
        struct list_head        delayed_cmd_list;
-       struct list_head        ordered_cmd_list;
        struct list_head        execute_task_list;
        struct list_head        state_task_list;
        struct list_head        qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
        struct se_subsystem_api *transport;
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
-       /* Linked list for struct se_global->g_se_dev_list */
-       struct list_head        g_se_dev_list;
 }  ____cacheline_aligned;
 
 struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
        u32             sep_index;
        struct scsi_port_stats sep_stats;
        /* Used for ALUA Target Port Groups membership */
-       atomic_t        sep_tg_pt_gp_active;
        atomic_t        sep_tg_pt_secondary_offline;
        /* Used for PR ALL_TG_PT=1 */
        atomic_t        sep_tg_pt_ref_cnt;
index c16e9431dd01bb40d748fc315d04d103d77a351d..dac4f2d859fd72734fbc54ffda839216fe554079 100644 (file)
 
 #define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
 
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT                0
-#define PYX_TRANSPORT_WRITE_PENDING            1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE       -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL           -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS     -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES  -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD                -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST   -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE          -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE                -8
-#define PYX_TRANSPORT_WRITE_PROTECTED          -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT     -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST          -11
-#define PYX_TRANSPORT_USE_SENSE_REASON         -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT          0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE                  0
-#define TRANSPORT_PLUGIN_REGISTERED            1
-
 #define TRANSPORT_PLUGIN_PHBA_PDEV             1
 #define TRANSPORT_PLUGIN_VHBA_PDEV             2
 #define TRANSPORT_PLUGIN_VHBA_VDEV             3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
 extern int transport_handle_cdb_direct(struct se_cmd *);
 extern int transport_generic_handle_cdb_map(struct se_cmd *);
 extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
 extern int transport_generic_handle_tmr(struct se_cmd *);
 extern bool target_stop_task(struct se_task *task, unsigned long *flags);
 extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
index b99caa8b780c624af834caeab4130549479e3844..99d1d0decf88e41a7c0c038d463330e8351046b1 100644 (file)
                {I_REFERENCED,          "I_REFERENCED"}         \
        )
 
+#define WB_WORK_REASON                                                 \
+               {WB_REASON_BACKGROUND,          "background"},          \
+               {WB_REASON_TRY_TO_FREE_PAGES,   "try_to_free_pages"},   \
+               {WB_REASON_SYNC,                "sync"},                \
+               {WB_REASON_PERIODIC,            "periodic"},            \
+               {WB_REASON_LAPTOP_TIMER,        "laptop_timer"},        \
+               {WB_REASON_FREE_MORE_MEM,       "free_more_memory"},    \
+               {WB_REASON_FS_FREE_SPACE,       "fs_free_space"},       \
+               {WB_REASON_FORKER_THREAD,       "forker_thread"}
+
 struct wb_writeback_work;
 
 DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
                  __entry->for_kupdate,
                  __entry->range_cyclic,
                  __entry->for_background,
-                 wb_reason_name[__entry->reason]
+                 __print_symbolic(__entry->reason, WB_WORK_REASON)
        )
 );
 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
                __entry->older, /* older_than_this in jiffies */
                __entry->age,   /* older_than_this in relative milliseconds */
                __entry->moved,
-               wb_reason_name[__entry->reason])
+               __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
 );
 
 TRACE_EVENT(global_dirty_state,
index b66ebb2032c6d6b87c928876cab69c235d2425f3..378c7ed6760be0563b501971df2a60e03a672ea7 100644 (file)
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
        void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
 };
 
-#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
 /* Init with the board info */
 extern int omap_display_init(struct omap_dss_board_info *board_data);
-#else
-static inline int omap_display_init(struct omap_dss_board_info *board_data)
-{
-       return 0;
-}
-#endif
 
 struct omap_display_platform_data {
        struct omap_dss_board_info *board_data;
index f0b6890370be053e25feadfd7bf0eeeacb78f800..f6f07aa35af5f8b0e91347d9707ffd08eccd577f 100644 (file)
@@ -29,8 +29,7 @@ enum xsd_sockmsg_type
     XS_IS_DOMAIN_INTRODUCED,
     XS_RESUME,
     XS_SET_TARGET,
-    XS_RESTRICT,
-    XS_RESET_WATCHES
+    XS_RESTRICT
 };
 
 #define XS_WRITE_NONE "NONE"
index a785a3b0c8c7d89d9a46e1a1d90300cb91da5308..438c256c274b3b7984155399f87e26cd13d27cd7 100644 (file)
@@ -29,8 +29,7 @@
 static inline int xen_must_unplug_nics(void) {
 #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
                defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \
-               (defined(CONFIG_XEN_PLATFORM_PCI) || \
-                defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
+               defined(CONFIG_XEN_PVHVM)
         return 1;
 #else
         return 0;
@@ -40,8 +39,7 @@ static inline int xen_must_unplug_nics(void) {
 static inline int xen_must_unplug_disks(void) {
 #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \
                defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \
-               (defined(CONFIG_XEN_PLATFORM_PCI) || \
-                defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
+               defined(CONFIG_XEN_PVHVM)
         return 1;
 #else
         return 0;
index 43298f9810fba87bdf0083ac1fa5401b0819c4cb..b8930d5a8325fc0557ddb8649e16d7c8d7c68f21 100644 (file)
@@ -689,6 +689,17 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
          For those who want to have the feature enabled by default should
          select this option (if, for some reason, they need to disable it
          then swapaccount=0 does the trick).
+config CGROUP_MEM_RES_CTLR_KMEM
+       bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
+       depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL
+       default n
+       help
+         The Kernel Memory extension for Memory Resource Controller can limit
+         the amount of memory used by kernel objects in the system. Those are
+         fundamentally different from the entities handled by the standard
+         Memory Controller, which are page-based, and can be swapped. Users of
+         the kmem extension can use it to guarantee that no group of processes
+         will ever exhaust kernel resources alone.
 
 config CGROUP_PERF
        bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
index 2e0ecfcc881dd124e3b2a2d868971fd1e6dea6f8..5b4293d9819d87b636d09d63224df43535e44fc4 100644 (file)
@@ -1269,7 +1269,7 @@ void mq_clear_sbinfo(struct ipc_namespace *ns)
 
 void mq_put_mnt(struct ipc_namespace *ns)
 {
-       mntput(ns->mq_mnt);
+       kern_unmount(ns->mq_mnt);
 }
 
 static int __init init_mqueue_fs(void)
@@ -1291,11 +1291,9 @@ static int __init init_mqueue_fs(void)
 
        spin_lock_init(&mq_lock);
 
-       init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
-       if (IS_ERR(init_ipc_ns.mq_mnt)) {
-               error = PTR_ERR(init_ipc_ns.mq_mnt);
+       error = mq_init_ns(&init_ipc_ns);
+       if (error)
                goto out_filesystem;
-       }
 
        return 0;
 
index 8b5ce5d3f3ef3e4f468d5afc4175cd22518fc029..5652101cdac03d42ca9645017cc2a4f8df6537cb 100644 (file)
@@ -27,11 +27,6 @@ DEFINE_SPINLOCK(mq_lock);
  */
 struct ipc_namespace init_ipc_ns = {
        .count          = ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
-       .mq_queues_max   = DFLT_QUEUESMAX,
-       .mq_msg_max      = DFLT_MSGMAX,
-       .mq_msgsize_max  = DFLT_MSGSIZEMAX,
-#endif
        .user_ns = &init_user_ns,
 };
 
index d9d5648f3cdcc4bcde3a7a67afe845aa89dc2a1f..a184470cf9b51c896826986deca941dd5dcf3d89 100644 (file)
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
                        continue;
                /* get old css_set pointer */
                task_lock(tsk);
-               if (tsk->flags & PF_EXITING) {
-                       /* ignore this task if it's going away */
-                       task_unlock(tsk);
-                       continue;
-               }
                oldcg = tsk->cgroups;
                get_css_set(oldcg);
                task_unlock(tsk);
index 5e828a2ca8e64641749da837fe093cefcfc958dc..213c0351dad8fc8315d6d79a5bf3706a59c823bd 100644 (file)
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
        kfree(cgroup_freezer(cgroup));
 }
 
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+       return frozen(task) ||
+               (task_is_stopped_or_traced(task) && freezing(task));
+}
+
 /*
  * The call to cgroup_lock() in the freezer.state write method prevents
  * a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
                ntotal++;
-               if (frozen(task))
+               if (is_task_frozen_enough(task))
                        nfrozen++;
        }
 
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        while ((task = cgroup_iter_next(cgroup, &it))) {
                if (!freeze_task(task, true))
                        continue;
-               if (frozen(task))
+               if (is_task_frozen_enough(task))
                        continue;
                if (!freezing(task) && !freezer_should_skip(task))
                        num_cant_freeze_now++;
index 9fe58c46a426de719da9636d8c74d8c9f6bb312d..0b1712dba587fdee93709798b14389f3f1d85b34 100644 (file)
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
                            struct cpuset, css);
 }
 
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return false;
+}
+#endif
+
+
 /* bits in struct cpuset flags field */
 typedef enum {
        CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 static void cpuset_change_task_nodemask(struct task_struct *tsk,
                                        nodemask_t *newmems)
 {
-       bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+       bool need_loop;
 
 repeat:
        /*
@@ -962,6 +975,14 @@ repeat:
                return;
 
        task_lock(tsk);
+       /*
+        * Determine if a loop is necessary if another thread is doing
+        * get_mems_allowed().  If at least one node remains unchanged and
+        * tsk does not have a mempolicy, then an empty nodemask will not be
+        * possible when mems_allowed is larger than a word.
+        */
+       need_loop = task_has_mempolicy(tsk) ||
+                       !nodes_intersects(*newmems, tsk->mems_allowed);
        nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
@@ -981,11 +1002,9 @@ repeat:
 
        /*
         * Allocation of memory is very fast, we needn't sleep when waiting
-        * for the read-side.  No wait is necessary, however, if at least one
-        * node remains unchanged.
+        * for the read-side.
         */
-       while (masks_disjoint &&
-                       ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+       while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
                task_unlock(tsk);
                if (!task_curr(tsk))
                        yield();
index 0e8457da6f9551c3eae667a6ac09a735a341b4bf..58690af323e469213db42bce2c0d1a772db12519 100644 (file)
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
+
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, ctx, task);
+       if (ctx->nr_events)
+               cpuctx->task_ctx = ctx;
 
-       cpuctx->task_ctx = ctx;
+       perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
 
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        struct ring_buffer *rb;
        unsigned int events = POLL_HUP;
 
+       /*
+        * Race between perf_event_set_output() and perf_poll(): perf_poll()
+        * grabs the rb reference but perf_event_set_output() overrides it.
+        * Here is the timeline for two threads T1, T2:
+        * t0: T1, rb = rcu_dereference(event->rb)
+        * t1: T2, old_rb = event->rb
+        * t2: T2, event->rb = new rb
+        * t3: T2, ring_buffer_detach(old_rb)
+        * t4: T1, ring_buffer_attach(rb1)
+        * t5: T1, poll_wait(event->waitq)
+        *
+        * To avoid this problem, we grab mmap_mutex in perf_poll()
+        * thereby ensuring that the assignment of the new ring buffer
+        * and the detachment of the old buffer appear atomic to perf_poll()
+        */
+       mutex_lock(&event->mmap_mutex);
+
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       if (rb)
+       if (rb) {
+               ring_buffer_attach(event, rb);
                events = atomic_xchg(&rb->poll, 0);
+       }
        rcu_read_unlock();
 
+       mutex_unlock(&event->mmap_mutex);
+
        poll_wait(file, &event->waitq, wait);
 
        return events;
@@ -3496,6 +3521,53 @@ unlock:
        return ret;
 }
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (!list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       if (!list_empty(&event->rb_entry))
+               goto unlock;
+
+       list_add(&event->rb_entry, &rb->event_list);
+unlock:
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_del_init(&event->rb_entry);
+       wake_up_all(&event->waitq);
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+       struct ring_buffer *rb;
+
+       rcu_read_lock();
+       rb = rcu_dereference(event->rb);
+       if (!rb)
+               goto unlock;
+
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+               wake_up_all(&event->waitq);
+
+unlock:
+       rcu_read_unlock();
+}
+
 static void rb_free_rcu(struct rcu_head *rcu_head)
 {
        struct ring_buffer *rb;
@@ -3521,9 +3593,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
+       struct perf_event *event, *n;
+       unsigned long flags;
+
        if (!atomic_dec_and_test(&rb->refcount))
                return;
 
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+               list_del_init(&event->rb_entry);
+               wake_up_all(&event->waitq);
+       }
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+
        call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
@@ -3546,6 +3628,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
                vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
+               ring_buffer_detach(event, rb);
                mutex_unlock(&event->mmap_mutex);
 
                ring_buffer_put(rb);
@@ -3700,7 +3783,7 @@ static const struct file_operations perf_fops = {
 
 void perf_event_wakeup(struct perf_event *event)
 {
-       wake_up_all(&event->waitq);
+       ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
                kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5905,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        INIT_LIST_HEAD(&event->group_entry);
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
+       INIT_LIST_HEAD(&event->rb_entry);
+
        init_waitqueue_head(&event->waitq);
        init_irq_work(&event->pending, perf_pending_event);
 
@@ -6028,6 +6113,8 @@ set:
 
        old_rb = event->rb;
        rcu_assign_pointer(event->rb, rb);
+       if (old_rb)
+               ring_buffer_detach(event, old_rb);
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
index 09097dd8116c0e0bf5120d4da26c5f539a7f600a..64568a699375f105232eb588963da8707f926295 100644 (file)
@@ -22,6 +22,9 @@ struct ring_buffer {
        local_t                         lost;           /* nr records lost   */
 
        long                            watermark;      /* wakeup watermark  */
+       /* poll crap */
+       spinlock_t                      event_lock;
+       struct list_head                event_list;
 
        struct perf_event_mmap_page     *user_page;
        void                            *data_pages[0];
index a2a29205cc0fc10913277132162e0a515a944552..7f3011c6b57fa3288c7e4c46a555736763765a84 100644 (file)
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
                rb->writable = 1;
 
        atomic_set(&rb->refcount, 1);
+
+       INIT_LIST_HEAD(&rb->event_list);
+       spin_lock_init(&rb->event_lock);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index d0b7d988f8735beb6e1e7b5f7b42ecbd04b91bf5..e6e01b959a0ef3b3efd0fc0624a3286a57770fc8 100644 (file)
@@ -1540,8 +1540,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
        }
 
        /* dead body doesn't have much to contribute */
-       if (p->exit_state == EXIT_DEAD)
+       if (unlikely(p->exit_state == EXIT_DEAD)) {
+               /*
+                * But do not ignore this task until the tracer does
+                * wait_task_zombie()->do_notify_parent().
+                */
+               if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+                       wo->notask_error = 0;
                return 0;
+       }
 
        /* slay zombie? */
        if (p->exit_state == EXIT_ZOMBIE) {
index ba0d172613296047a24918106d90eff09f2a5bb8..da4a6a10d088d2d575c4eeae9438f832efc6f41b 100644 (file)
@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
 
 void free_task(struct task_struct *tsk)
 {
-       prop_local_destroy_single(&tsk->dirties);
        account_kernel_stack(tsk->stack, -1);
        free_thread_info(tsk->stack);
        rt_mutex_debug_task_free(tsk);
@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
 
        tsk->stack = ti;
 
-       err = prop_local_init_single(&tsk->dirties);
-       if (err)
-               goto out;
-
        setup_thread_stack(tsk, orig);
        clear_user_return_notifier(tsk);
        clear_tsk_need_resched(tsk);
index ea87f4d2f455c8c99164cb555287ad1c9fb165e3..1614be20173dcb19ef45e65fa25d07f8d3c65194 100644 (file)
@@ -314,17 +314,29 @@ again:
 #endif
 
        lock_page(page_head);
+
+       /*
+        * If page_head->mapping is NULL, then it cannot be a PageAnon
+        * page; but it might be the ZERO_PAGE or in the gate area or
+        * in a special mapping (all cases which we are happy to fail);
+        * or it may have been a good file page when get_user_pages_fast
+        * found it, but truncated or holepunched or subjected to
+        * invalidate_complete_page2 before we got the page lock (also
+        * cases which we are happy to fail).  And we hold a reference,
+        * so refcount care in invalidate_complete_page's remove_mapping
+        * prevents drop_caches from setting mapping to NULL beneath us.
+        *
+        * The case we do have to guard against is when memory pressure made
+        * shmem_writepage move it from filecache to swapcache beneath us:
+        * an unlikely race, but we do need to retry for page_head->mapping.
+        */
        if (!page_head->mapping) {
+               int shmem_swizzled = PageSwapCache(page_head);
                unlock_page(page_head);
                put_page(page_head);
-               /*
-               * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
-               * trying to find one. RW mapping would have COW'd (and thus
-               * have a mapping) so this page is RO and won't ever change.
-               */
-               if ((page_head == ZERO_PAGE(address)))
-                       return -EFAULT;
-               goto again;
+               if (shmem_swizzled)
+                       goto again;
+               return -EFAULT;
        }
 
        /*
index 422e567eecf63636a7c349b6319e6d541c6f3818..ae34bf51682b4a204de93f62943055350cd5c4d0 100644 (file)
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
 {
+       struct timerqueue_node *next_timer;
        if (!(timer->state & HRTIMER_STATE_ENQUEUED))
                goto out;
 
-       if (&timer->node == timerqueue_getnext(&base->active)) {
+       next_timer = timerqueue_getnext(&base->active);
+       timerqueue_del(&base->active, &timer->node);
+       if (&timer->node == next_timer) {
 #ifdef CONFIG_HIGH_RES_TIMERS
                /* Reprogram the clock event device. if enabled */
                if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
                }
 #endif
        }
-       timerqueue_del(&base->active, &timer->node);
        if (!timerqueue_getnext(&base->active))
                base->cpu_base->active_bases &= ~(1 << base->index);
 out:
index 8b1748d0172c7e35672c98c9ab6e93b4d815f850..2e48ec0c2e91cf099642f8bf43e90476fb6457a3 100644 (file)
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 
        /*
         * Ensure the task is not frozen.
-        * Also, when a freshly created task is scheduled once, changes
-        * its state to TASK_UNINTERRUPTIBLE without having ever been
-        * switched out once, it musn't be checked.
+        * Also, skip vfork and any other user process that freezer should skip.
         */
-       if (unlikely(t->flags & PF_FROZEN || !switch_count))
+       if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+           return;
+
+       /*
+        * When a freshly created task is scheduled once, changes its state to
+        * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+        * musn't be checked.
+        */
+       if (unlikely(!switch_count))
                return;
 
        if (switch_count != t->last_switch_count) {
index 67ce837ae52cdd70115a8ce436f923e96a26adc4..1da999f5e746caedacb8b40d0015e7c3f273ad04 100644 (file)
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
+       set_current_state(TASK_INTERRUPTIBLE);
+
        while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
+       __set_current_state(TASK_RUNNING);
        return -1;
 }
 
@@ -1596,7 +1599,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
                return -ENOMEM;
 
        action->handler = handler;
-       action->flags = IRQF_PERCPU;
+       action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
        action->name = devname;
        action->percpu_dev_id = dev_id;
 
index aa57d5da18c1de65e807098702ad9b841fa3788d..dc813a948be2379fe20a0218e1d270212b8408d3 100644 (file)
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
         */
        action = desc->action;
        if (!action || !(action->flags & IRQF_SHARED) ||
-           (action->flags & __IRQF_TIMER) || !action->next)
+           (action->flags & __IRQF_TIMER) ||
+           (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+           !action->next)
                goto out;
 
        /* Already running on another processor */
@@ -115,7 +117,7 @@ static int misrouted_irq(int irq)
        struct irq_desc *desc;
        int i, ok = 0;
 
-       if (atomic_inc_return(&irq_poll_active) == 1)
+       if (atomic_inc_return(&irq_poll_active) != 1)
                goto out;
 
        irq_poll_cpu = smp_processor_id();
index bbdfe2a462a088b210d5792c674b215274b1b39b..66ff7109f6970ca63cb4aa9bf6b4800d69ef2f3a 100644 (file)
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
                return;
 
        jump_label_lock();
-       if (atomic_add_return(1, &key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0)
                jump_label_update(key, JUMP_LABEL_ENABLE);
+       atomic_inc(&key->enabled);
        jump_label_unlock();
 }
 
index e69434b070da3f922909ece9417627e11234dcd6..b2e08c932d91c6507f07d637f61f0df1205778b8 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/gfp.h>
+#include <linux/kmemcheck.h>
 
 #include <asm/sections.h>
 
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       memset(lock, 0, sizeof(*lock));
+       int i;
+
+       kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+       for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+               lock->class_cache[i] = NULL;
 
 #ifdef CONFIG_LOCK_STAT
        lock->cpu = raw_smp_processor_id();
index b4511b6d3ef93ef5e2d12a097c3a63fa058de423..a6b0503574ee714b2cbe6c871741d298ad9ce5f7 100644 (file)
@@ -55,6 +55,8 @@ enum {
 
 static int hibernation_mode = HIBERNATION_SHUTDOWN;
 
+static bool freezer_test_done;
+
 static const struct platform_hibernation_ops *hibernation_ops;
 
 /**
@@ -345,11 +347,24 @@ int hibernation_snapshot(int platform_mode)
 
        error = freeze_kernel_threads();
        if (error)
-               goto Close;
+               goto Cleanup;
+
+       if (hibernation_test(TEST_FREEZER) ||
+               hibernation_testmode(HIBERNATION_TESTPROC)) {
+
+               /*
+                * Indicate to the caller that we are returning due to a
+                * successful freezer test.
+                */
+               freezer_test_done = true;
+               goto Cleanup;
+       }
 
        error = dpm_prepare(PMSG_FREEZE);
-       if (error)
-               goto Complete_devices;
+       if (error) {
+               dpm_complete(msg);
+               goto Cleanup;
+       }
 
        suspend_console();
        pm_restrict_gfp_mask();
@@ -378,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
                pm_restore_gfp_mask();
 
        resume_console();
-
- Complete_devices:
        dpm_complete(msg);
 
  Close:
@@ -389,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
  Recover_platform:
        platform_recover(platform_mode);
        goto Resume_devices;
+
+ Cleanup:
+       swsusp_free();
+       goto Close;
 }
 
 /**
@@ -641,15 +658,13 @@ int hibernate(void)
        if (error)
                goto Finish;
 
-       if (hibernation_test(TEST_FREEZER))
-               goto Thaw;
-
-       if (hibernation_testmode(HIBERNATION_TESTPROC))
-               goto Thaw;
-
        error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
        if (error)
                goto Thaw;
+       if (freezer_test_done) {
+               freezer_test_done = false;
+               goto Thaw;
+       }
 
        if (in_suspend) {
                unsigned int flags = 0;
index 71f49fe4377e907c85c6d5712335aa56b579a79a..36e0f0903c3245e60d2994121d1aaa28c8fc0a23 100644 (file)
@@ -290,13 +290,14 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
                if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
                        break;
        }
-       if (state < PM_SUSPEND_MAX && *s)
+       if (state < PM_SUSPEND_MAX && *s) {
                error = enter_state(state);
                if (error) {
                        suspend_stats.fail++;
                        dpm_save_failed_errno(error);
                } else
                        suspend_stats.success++;
+       }
 #endif
 
  Exit:
index 1455a0d4eedd4b386c759d689f939ba5d7a9007a..7982a0a841eaf082fe929e24a1f4bd5aefd4f015 100644 (file)
@@ -1293,10 +1293,11 @@ again:
        raw_spin_lock(&logbuf_lock);
        if (con_start != log_end)
                retry = 1;
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
        if (retry && console_trylock())
                goto again;
 
-       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
        if (wake_klogd)
                wake_up_klogd();
 }
index 24d04477b2575f0239782cf6287298a47d50f48c..78ab24a7b0e440e2b44d75cc08bc353ab6f939c2 100644 (file)
@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
         */
        if (!(child->flags & PF_EXITING) &&
            (child->signal->flags & SIGNAL_STOP_STOPPED ||
-            child->signal->group_stop_count))
+            child->signal->group_stop_count)) {
                child->jobctl |= JOBCTL_STOP_PENDING;
 
+               /*
+                * This is only possible if this thread was cloned by the
+                * traced task running in the stopped group, set the signal
+                * for the future reports.
+                * FIXME: we should change ptrace_init_task() to handle this
+                * case.
+                */
+               if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
+                       child->jobctl |= SIGSTOP;
+       }
+
        /*
         * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
         * @child in the butt.  Note that @resume should be used iff @child
index 0e9344a71be33f6335bf55fd3a7bfe0f52418162..d6b149ccf925c320841e8a42f31fd23b6ee64dc6 100644 (file)
@@ -71,6 +71,7 @@
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
 #include <linux/slab.h>
+#include <linux/init_task.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
  *
  * This waits for completion of a specific task to be signaled. It is
  * interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_interruptible(struct completion *x)
 {
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
  *
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  *
  * This waits to be signaled for completion of a specific task. It can be
  * interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_killable(struct completion *x)
 {
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  * This waits for either a completion of a specific task to be
  * signaled or for a specified timeout to expire. It can be
  * interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
+#if defined(CONFIG_SMP)
+       sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
 }
 
 /*
index 5c9e67923b7cfd7826903c17322c3f0c55de5d74..8a39fa3e3c6c7bafe368bc2e27c607801e8afb87 100644 (file)
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+       long tg_weight;
+
+       /*
+        * Use this CPU's actual weight instead of the last load_contribution
+        * to gain a more accurate current total weight. See
+        * update_cfs_rq_load_contribution().
+        */
+       tg_weight = atomic_read(&tg->load_weight);
+       tg_weight -= cfs_rq->load_contribution;
+       tg_weight += cfs_rq->load.weight;
+
+       return tg_weight;
+}
+
 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long load_weight, load, shares;
+       long tg_weight, load, shares;
 
+       tg_weight = calc_tg_weight(tg, cfs_rq);
        load = cfs_rq->load.weight;
 
-       load_weight = atomic_read(&tg->load_weight);
-       load_weight += load;
-       load_weight -= cfs_rq->load_contribution;
-
        shares = (tg->shares * load);
-       if (load_weight)
-               shares /= load_weight;
+       if (tg_weight)
+               shares /= tg_weight;
 
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
-       if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+       if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
                return;
 
        __return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
  * Adding load to a group doesn't make a group heavier, but can cause movement
  * of group shares between cpus. Assuming the shares were perfectly aligned one
  * can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ *   s_i = rw_i / \Sum rw_j                                            (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ *   rw_i = {   2,   4,   1,   0 }
+ *   s_i  = { 2/7, 4/7, 1/7,   0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)                           (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ *   rw'_i = {   3,   4,   1,   0 }
+ *   s'_i  = { 3/8, 4/8, 1/8,   0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ *   dw_i = S * (s'_i - s_i)                                           (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
  */
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 {
        struct sched_entity *se = tg->se[cpu];
 
-       if (!tg->parent)
+       if (!tg->parent)        /* the trivial, non-cgroup case */
                return wl;
 
        for_each_sched_entity(se) {
-               long lw, w;
+               long w, W;
 
                tg = se->my_q->tg;
-               w = se->my_q->load.weight;
 
-               /* use this cpu's instantaneous contribution */
-               lw = atomic_read(&tg->load_weight);
-               lw -= se->my_q->load_contribution;
-               lw += w + wg;
+               /*
+                * W = @wg + \Sum rw_j
+                */
+               W = wg + calc_tg_weight(tg, se->my_q);
 
-               wl += w;
+               /*
+                * w = rw_i + @wl
+                */
+               w = se->my_q->load.weight + wl;
 
-               if (lw > 0 && wl < lw)
-                       wl = (wl * tg->shares) / lw;
+               /*
+                * wl = S * s'_i; see (2)
+                */
+               if (W > 0 && w < W)
+                       wl = (w * tg->shares) / W;
                else
                        wl = tg->shares;
 
-               /* zero point is MIN_SHARES */
+               /*
+                * Per the above, wl is the new se->load.weight value; since
+                * those are clipped to [MIN_SHARES, ...) do so now. See
+                * calc_cfs_shares().
+                */
                if (wl < MIN_SHARES)
                        wl = MIN_SHARES;
+
+               /*
+                * wl = dw_i = S * (s'_i - s_i); see (3)
+                */
                wl -= se->load.weight;
+
+               /*
+                * Recursively apply this logic to all parent groups to compute
+                * the final effective load change on the root group. Since
+                * only the @tg group gets extra weight, all parent groups can
+                * only redistribute existing shares. @wl is the shift in shares
+                * resulting from this level per the above.
+                */
                wg = 0;
        }
 
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
-       int i;
+       struct sched_group *sg;
+       int i, smt = 0;
 
        /*
         * If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,40 @@ static int select_idle_sibling(struct task_struct *p, int target)
         * Otherwise, iterate the domains and find an elegible idle cpu.
         */
        rcu_read_lock();
+again:
        for_each_domain(target, sd) {
+               if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+                       continue;
+
+               if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+                       break;
+
                if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
 
-               for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
-                       if (idle_cpu(i)) {
-                               target = i;
-                               break;
+               sg = sd->groups;
+               do {
+                       if (!cpumask_intersects(sched_group_cpus(sg),
+                                               tsk_cpus_allowed(p)))
+                               goto next;
+
+                       for_each_cpu(i, sched_group_cpus(sg)) {
+                               if (!idle_cpu(i))
+                                       goto next;
                        }
-               }
 
-               /*
-                * Lets stop looking for an idle sibling when we reached
-                * the domain that spans the current cpu and prev_cpu.
-                */
-               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
-                       break;
+                       target = cpumask_first_and(sched_group_cpus(sg),
+                                       tsk_cpus_allowed(p));
+                       goto done;
+next:
+                       sg = sg->next;
+               } while (sg != sd->groups);
+       }
+       if (!smt) {
+               smt = 1;
+               goto again;
        }
+done:
        rcu_read_unlock();
 
        return target;
@@ -3511,7 +3604,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
 }
 
 /**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  * @sd: sched_domain whose statistics are to be updated.
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
index efa0a7b75dde7408e89bd07e5b1a490c4f68ea95..84802245abd2562acad3c0eb734fe48cc5213b8e 100644 (file)
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
 SCHED_FEAT(TTWU_QUEUE, 1)
 
 SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(RT_RUNTIME_SHARE, 1)
index 056cbd2e2a27fea8cb15e76bfc711fe32de03303..583a1368afe6ed7d96879d762f73553b2068e27c 100644 (file)
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
 {
        int more = 0;
 
+       if (!sched_feat(RT_RUNTIME_SHARE))
+               return more;
+
        if (rt_rq->rt_time > rt_rq->rt_runtime) {
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                more = do_balance_runtime(rt_rq);
index b3f78d09a1053b67f62719a8879f6619ae12253b..206551563cce5e9c09796c32674d634f8802d69b 100644 (file)
@@ -1994,8 +1994,6 @@ static bool do_signal_stop(int signr)
                 */
                if (!(sig->flags & SIGNAL_STOP_STOPPED))
                        sig->group_exit_code = signr;
-               else
-                       WARN_ON_ONCE(!current->ptrace);
 
                sig->group_stop_count = 0;
 
index 6318b511afa10b3044c7bbd1eb49b28f7c490fcc..a650694883a180e93c5ec1d6414e45ba904fcff3 100644 (file)
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
 
        fput(file);
 out_putname:
-       putname(pathname);
+       __putname(pathname);
 out:
        return result;
 }
index c436e790b21bf7cd89878eb9d92146e1b1c511d6..8a46f5d64504f15dcaf31ec4f5fcee7ea15a8bdf 100644 (file)
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
                struct alarm *alarm;
                ktime_t expired = next->expires;
 
-               if (expired.tv64 >= now.tv64)
+               if (expired.tv64 > now.tv64)
                        break;
 
                alarm = container_of(next, struct alarm, node);
index cf52fda2e0966d005e152ff8747d5d44db5fbd7a..d3ad022136e56b97f7b51ec1a3fa018889a4944b 100644 (file)
@@ -491,6 +491,22 @@ void clocksource_touch_watchdog(void)
        clocksource_resume_watchdog();
 }
 
+/**
+ * clocksource_max_adjustment- Returns max adjustment amount
+ * @cs:         Pointer to clocksource
+ *
+ */
+static u32 clocksource_max_adjustment(struct clocksource *cs)
+{
+       u64 ret;
+       /*
+        * We won't try to correct for more then 11% adjustments (110,000 ppm),
+        */
+       ret = (u64)cs->mult * 11;
+       do_div(ret,100);
+       return (u32)ret;
+}
+
 /**
  * clocksource_max_deferment - Returns max time the clocksource can be deferred
  * @cs:         Pointer to clocksource
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
        /*
         * Calculate the maximum number of cycles that we can pass to the
         * cyc2ns function without overflowing a 64-bit signed result. The
-        * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
-        * is equivalent to the below.
-        * max_cycles < (2^63)/cs->mult
-        * max_cycles < 2^(log2((2^63)/cs->mult))
-        * max_cycles < 2^(log2(2^63) - log2(cs->mult))
-        * max_cycles < 2^(63 - log2(cs->mult))
-        * max_cycles < 1 << (63 - log2(cs->mult))
+        * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
+        * which is equivalent to the below.
+        * max_cycles < (2^63)/(cs->mult + cs->maxadj)
+        * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
+        * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
+        * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
+        * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
         * Please note that we add 1 to the result of the log2 to account for
         * any rounding errors, ensure the above inequality is satisfied and
         * no overflow will occur.
         */
-       max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+       max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
 
        /*
         * The actual maximum number of cycles we can defer the clocksource is
         * determined by the minimum of max_cycles and cs->mask.
+        * Note: Here we subtract the maxadj to make sure we don't sleep for
+        * too long if there's a large negative adjustment.
         */
        max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
-       max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+       max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
+                                       cs->shift);
 
        /*
         * To ensure that the clocksource does not wrap whilst we are idle,
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
         * note a margin of 12.5% is used because this can be computed with
         * a shift, versus say 10% which would require division.
         */
-       return max_nsecs - (max_nsecs >> 5);
+       return max_nsecs - (max_nsecs >> 3);
 }
 
 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -628,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
 
 /**
  * __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
 void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
 {
        u64 sec;
-
        /*
         * Calc the maximum number of seconds which we can run before
         * wrapping around. For clocksources which have a mask > 32bit
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
         * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
         * margin as we do in clocksource_max_deferment()
         */
-       sec = (cs->mask - (cs->mask >> 5));
+       sec = (cs->mask - (cs->mask >> 3));
        do_div(sec, freq);
        do_div(sec, scale);
        if (!sec)
@@ -661,13 +679,27 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
 
        clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
                               NSEC_PER_SEC / scale, sec * scale);
+
+       /*
+        * for clocksources that have large mults, to avoid overflow.
+        * Since mult may be adjusted by ntp, add an safety extra margin
+        *
+        */
+       cs->maxadj = clocksource_max_adjustment(cs);
+       while ((cs->mult + cs->maxadj < cs->mult)
+               || (cs->mult - cs->maxadj > cs->mult)) {
+               cs->mult >>= 1;
+               cs->shift--;
+               cs->maxadj = clocksource_max_adjustment(cs);
+       }
+
        cs->max_idle_ns = clocksource_max_deferment(cs);
 }
 EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
 
 /**
  * __clocksource_register_scale - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -695,12 +727,18 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
 
 /**
  * clocksource_register - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  *
  * Returns -EBUSY if registration fails, zero otherwise.
  */
 int clocksource_register(struct clocksource *cs)
 {
+       /* calculate max adjustment for given mult/shift */
+       cs->maxadj = clocksource_max_adjustment(cs);
+       WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
+               "Clocksource %s might overflow on 11%% adjustment\n",
+               cs->name);
+
        /* calculate max idle time permitted for this clocksource */
        cs->max_idle_ns = clocksource_max_deferment(cs);
 
@@ -723,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
 
 /**
  * clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs:                clocksource to be changed
+ * @rating:    new rating
  */
 void clocksource_change_rating(struct clocksource *cs, int rating)
 {
@@ -734,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
 
 /**
  * clocksource_unregister - remove a registered clocksource
+ * @cs:        clocksource to be unregistered
  */
 void clocksource_unregister(struct clocksource *cs)
 {
@@ -749,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
 /**
  * sysfs_show_current_clocksources - sysfs interface for current clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing current clocksource.
@@ -769,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
 /**
  * sysfs_override_clocksource - interface for manually overriding clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       name of override clocksource
  * @count:     length of buffer
  *
@@ -804,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
 /**
  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing registered clocksources
index f954282d9a82758acf951960392e37b5b07c6014..fd4a7b1625a20b35fad917817e136fdbd2d48e62 100644 (file)
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
             (dev->features & CLOCK_EVT_FEAT_C3STOP))
                return 0;
 
-       clockevents_exchange_device(NULL, dev);
+       clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
        tick_broadcast_device.evtdev = dev;
        if (!cpumask_empty(tick_get_broadcast_mask()))
                tick_broadcast_start_periodic(dev);
index 2b021b0e8507e7e4f9951780c88877bb2d4d56bd..237841378c031ef0f2fa6c492559e695a111b74f 100644 (file)
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
                secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
                nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
                nsecs += timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&xtime_lock, seq));
        /*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
                *ts = xtime;
                tomono = wall_to_monotonic;
                nsecs = timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&xtime_lock, seq));
 
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
        s64 error, interval = timekeeper.cycle_interval;
        int adj;
 
+       /*
+        * The point of this is to check if the error is greater then half
+        * an interval.
+        *
+        * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
+        *
+        * Note we subtract one in the shift, so that error is really error*2.
+        * This "saves" dividing(shifting) intererval twice, but keeps the
+        * (error > interval) comparision as still measuring if error is
+        * larger then half an interval.
+        *
+        * Note: It does not "save" on aggrivation when reading the code.
+        */
        error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
        if (error > interval) {
+               /*
+                * We now divide error by 4(via shift), which checks if
+                * the error is greater then twice the interval.
+                * If it is greater, we need a bigadjust, if its smaller,
+                * we can adjust by 1.
+                */
                error >>= 2;
+               /*
+                * XXX - In update_wall_time, we round up to the next
+                * nanosecond, and store the amount rounded up into
+                * the error. This causes the likely below to be unlikely.
+                *
+                * The properfix is to avoid rounding up by using
+                * the high precision timekeeper.xtime_nsec instead of
+                * xtime.tv_nsec everywhere. Fixing this will take some
+                * time.
+                */
                if (likely(error <= interval))
                        adj = 1;
                else
                        adj = timekeeping_bigadjust(error, &interval, &offset);
        } else if (error < -interval) {
+               /* See comment above, this is just switched for the negative */
                error >>= 2;
                if (likely(error >= -interval)) {
                        adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
                        offset = -offset;
                } else
                        adj = timekeeping_bigadjust(error, &interval, &offset);
-       } else
+       } else /* No adjustment needed */
                return;
 
+       WARN_ONCE(timekeeper.clock->maxadj &&
+                       (timekeeper.mult + adj > timekeeper.clock->mult +
+                                               timekeeper.clock->maxadj),
+                       "Adjusting %s more then 11%% (%ld vs %ld)\n",
+                       timekeeper.clock->name, (long)timekeeper.mult + adj,
+                       (long)timekeeper.clock->mult +
+                               timekeeper.clock->maxadj);
+       /*
+        * So the following can be confusing.
+        *
+        * To keep things simple, lets assume adj == 1 for now.
+        *
+        * When adj != 1, remember that the interval and offset values
+        * have been appropriately scaled so the math is the same.
+        *
+        * The basic idea here is that we're increasing the multiplier
+        * by one, this causes the xtime_interval to be incremented by
+        * one cycle_interval. This is because:
+        *      xtime_interval = cycle_interval * mult
+        * So if mult is being incremented by one:
+        *      xtime_interval = cycle_interval * (mult + 1)
+        * Its the same as:
+        *      xtime_interval = (cycle_interval * mult) + cycle_interval
+        * Which can be shortened to:
+        *      xtime_interval += cycle_interval
+        *
+        * So offset stores the non-accumulated cycles. Thus the current
+        * time (in shifted nanoseconds) is:
+        *      now = (offset * adj) + xtime_nsec
+        * Now, even though we're adjusting the clock frequency, we have
+        * to keep time consistent. In other words, we can't jump back
+        * in time, and we also want to avoid jumping forward in time.
+        *
+        * So given the same offset value, we need the time to be the same
+        * both before and after the freq adjustment.
+        *      now = (offset * adj_1) + xtime_nsec_1
+        *      now = (offset * adj_2) + xtime_nsec_2
+        * So:
+        *      (offset * adj_1) + xtime_nsec_1 =
+        *              (offset * adj_2) + xtime_nsec_2
+        * And we know:
+        *      adj_2 = adj_1 + 1
+        * So:
+        *      (offset * adj_1) + xtime_nsec_1 =
+        *              (offset * (adj_1+1)) + xtime_nsec_2
+        *      (offset * adj_1) + xtime_nsec_1 =
+        *              (offset * adj_1) + offset + xtime_nsec_2
+        * Canceling the sides:
+        *      xtime_nsec_1 = offset + xtime_nsec_2
+        * Which gives us:
+        *      xtime_nsec_2 = xtime_nsec_1 - offset
+        * Which simplfies to:
+        *      xtime_nsec -= offset
+        *
+        * XXX - TODO: Doc ntp_error calculation.
+        */
        timekeeper.mult += adj;
        timekeeper.xtime_interval += interval;
        timekeeper.xtime_nsec -= offset;
index dbaa62422b13c057754252d986f97440ec7dd3af..9c3c62b0c4bc89ebd307ff79950039021d157861 100644 (file)
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
        int pid;
 
        rcu_read_lock();
-       pid = task_tgid_vnr(current->real_parent);
+       pid = task_tgid_vnr(rcu_dereference(current->real_parent));
        rcu_read_unlock();
 
        return pid;
index 900b409543db10cfc46b9da703f463a1cee9b78e..b1e8943fed1d3a9fd61916527c59c70d57af7d2c 100644 (file)
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
        ftrace_pid_function = ftrace_stub;
 }
 
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 /*
  * For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        if (!src->count) {
                free_ftrace_hash_rcu(*dst);
                rcu_assign_pointer(*dst, EMPTY_HASH);
-               return 0;
+               /* still need to update the function records */
+               ret = 0;
+               goto out;
        }
 
        /*
index 581876f9f3872e9103a0110893396652599a1d6a..c212a7f934ec4841d8c9887ecf93f6a0b7526b27 100644 (file)
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
        /* First see if we did not already create this dir */
        list_for_each_entry(system, &event_subsystems, list) {
                if (strcmp(system->name, name) == 0) {
-                       __get_system(system);
                        system->nr_events++;
                        return system->entry;
                }
index 816d3d074979306713836d9447382cb641aecb54..95dc31efd6dd503dbd169a159dac4c5048f5a568 100644 (file)
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
                 */
                err = replace_preds(call, NULL, ps, filter_string, true);
                if (err)
-                       goto fail;
+                       call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+               else
+                       call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
        }
 
        list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
+               if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+                       continue;
+
                filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
                if (!filter_item)
                        goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
                 * replace the filter for the call.
                 */
                filter = call->filter;
-               call->filter = filter_item->filter;
+               rcu_assign_pointer(call->filter, filter_item->filter);
                filter_item->filter = filter;
 
                fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
                filter = call->filter;
                if (!filter)
                        goto out_unlock;
-               call->filter = NULL;
+               RCU_INIT_POINTER(call->filter, NULL);
                /* Make sure the filter is not being used */
                synchronize_sched();
                __free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
         * string
         */
        tmp = call->filter;
-       call->filter = filter;
+       rcu_assign_pointer(call->filter, filter);
        if (tmp) {
                /* Make sure the call is done with the filter */
                synchronize_sched();
index 32f3e5ae2be543c59ae3e50682ca504010eb87a3..63b5782732ed5344f2c2826d298fac7dbd76c127 100644 (file)
@@ -244,6 +244,9 @@ config CPU_RMAP
        bool
        depends on SMP
 
+config DQL
+       bool
+
 #
 # Netlink attribute parsing support is select'ed if needed
 #
index a4da283f5dc0182a45d370fd76884a86fa37ac42..ff00d4dcb7ed392291b96f91e87afe1bfbbefe3a 100644 (file)
@@ -115,6 +115,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
 
 obj-$(CONFIG_CORDIC) += cordic.o
 
+obj-$(CONFIG_DQL) += dynamic_queue_limits.o
+
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
 
index 74c6c7fce74900ec9870a15313268c87f9e1ecc3..fea790a2b17659e9b701987101db7929bdfbe8df 100644 (file)
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
 
 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
 {
-       return ((a->dev_addr == a->dev_addr) &&
+       return ((a->dev_addr == b->dev_addr) &&
                (a->dev == b->dev)) ? true : false;
 }
 
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
new file mode 100644 (file)
index 0000000..3d1bdcd
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Dynamic byte queue limits.  See include/linux/dynamic_queue_limits.h
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/dynamic_queue_limits.h>
+
+#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
+
+/* Records completed count and recalculates the queue limit */
+void dql_completed(struct dql *dql, unsigned int count)
+{
+       unsigned int inprogress, prev_inprogress, limit;
+       unsigned int ovlimit, all_prev_completed, completed;
+
+       /* Can't complete more than what's in queue */
+       BUG_ON(count > dql->num_queued - dql->num_completed);
+
+       completed = dql->num_completed + count;
+       limit = dql->limit;
+       ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
+       inprogress = dql->num_queued - completed;
+       prev_inprogress = dql->prev_num_queued - dql->num_completed;
+       all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
+
+       if ((ovlimit && !inprogress) ||
+           (dql->prev_ovlimit && all_prev_completed)) {
+               /*
+                * Queue considered starved if:
+                *   - The queue was over-limit in the last interval,
+                *     and there is no more data in the queue.
+                *  OR
+                *   - The queue was over-limit in the previous interval and
+                *     when enqueuing it was possible that all queued data
+                *     had been consumed.  This covers the case when queue
+                *     may have becomes starved between completion processing
+                *     running and next time enqueue was scheduled.
+                *
+                *     When queue is starved increase the limit by the amount
+                *     of bytes both sent and completed in the last interval,
+                *     plus any previous over-limit.
+                */
+               limit += POSDIFF(completed, dql->prev_num_queued) +
+                    dql->prev_ovlimit;
+               dql->slack_start_time = jiffies;
+               dql->lowest_slack = UINT_MAX;
+       } else if (inprogress && prev_inprogress && !all_prev_completed) {
+               /*
+                * Queue was not starved, check if the limit can be decreased.
+                * A decrease is only considered if the queue has been busy in
+                * the whole interval (the check above).
+                *
+                * If there is slack, the amount of execess data queued above
+                * the the amount needed to prevent starvation, the queue limit
+                * can be decreased.  To avoid hysteresis we consider the
+                * minimum amount of slack found over several iterations of the
+                * completion routine.
+                */
+               unsigned int slack, slack_last_objs;
+
+               /*
+                * Slack is the maximum of
+                *   - The queue limit plus previous over-limit minus twice
+                *     the number of objects completed.  Note that two times
+                *     number of completed bytes is a basis for an upper bound
+                *     of the limit.
+                *   - Portion of objects in the last queuing operation that
+                *     was not part of non-zero previous over-limit.  That is
+                *     "round down" by non-overlimit portion of the last
+                *     queueing operation.
+                */
+               slack = POSDIFF(limit + dql->prev_ovlimit,
+                   2 * (completed - dql->num_completed));
+               slack_last_objs = dql->prev_ovlimit ?
+                   POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
+
+               slack = max(slack, slack_last_objs);
+
+               if (slack < dql->lowest_slack)
+                       dql->lowest_slack = slack;
+
+               if (time_after(jiffies,
+                              dql->slack_start_time + dql->slack_hold_time)) {
+                       limit = POSDIFF(limit, dql->lowest_slack);
+                       dql->slack_start_time = jiffies;
+                       dql->lowest_slack = UINT_MAX;
+               }
+       }
+
+       /* Enforce bounds on limit */
+       limit = clamp(limit, dql->min_limit, dql->max_limit);
+
+       if (limit != dql->limit) {
+               dql->limit = limit;
+               ovlimit = 0;
+       }
+
+       dql->adj_limit = limit + completed;
+       dql->prev_ovlimit = ovlimit;
+       dql->prev_last_obj_cnt = dql->last_obj_cnt;
+       dql->num_completed = completed;
+       dql->prev_num_queued = dql->num_queued;
+}
+EXPORT_SYMBOL(dql_completed);
+
+void dql_reset(struct dql *dql)
+{
+       /* Reset all dynamic values */
+       dql->limit = 0;
+       dql->num_queued = 0;
+       dql->num_completed = 0;
+       dql->last_obj_cnt = 0;
+       dql->prev_num_queued = 0;
+       dql->prev_last_obj_cnt = 0;
+       dql->prev_ovlimit = 0;
+       dql->lowest_slack = UINT_MAX;
+       dql->slack_start_time = jiffies;
+}
+EXPORT_SYMBOL(dql_reset);
+
+int dql_init(struct dql *dql, unsigned hold_time)
+{
+       dql->max_limit = DQL_MAX_LIMIT;
+       dql->min_limit = 0;
+       dql->slack_hold_time = hold_time;
+       dql_reset(dql);
+       return 0;
+}
+EXPORT_SYMBOL(dql_init);
index 6a3bd48fa2a06e500d6695ac936a6ff4143775e4..75510e94f7d0b2fcd5736a4e0aeb9ff282da11d1 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm/div64.h>
 #include <linux/reciprocal_div.h>
+#include <linux/export.h>
 
 u32 reciprocal_value(u32 k)
 {
@@ -7,3 +8,4 @@ u32 reciprocal_value(u32 k)
        do_div(val, k);
        return (u32)val;
 }
+EXPORT_SYMBOL(reciprocal_value);
index 993599e66e5a91bf4a5ff22565b4a7560651128c..8e75003d62f632c52b8c0ea56ca62bdc72e560f0 100644 (file)
@@ -777,6 +777,18 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        return string(buf, end, uuid, spec);
 }
 
+static
+char *netdev_feature_string(char *buf, char *end, const u8 *addr,
+                     struct printf_spec spec)
+{
+       spec.flags |= SPECIAL | SMALL | ZEROPAD;
+       if (spec.field_width == -1)
+               spec.field_width = 2 + 2 * sizeof(netdev_features_t);
+       spec.base = 16;
+
+       return number(buf, end, *(const netdev_features_t *)addr, spec);
+}
+
 int kptr_restrict __read_mostly;
 
 /*
@@ -824,6 +836,7 @@ int kptr_restrict __read_mostly;
  *       Do not use this feature without some mechanism to verify the
  *       correctness of the format string and va_list arguments.
  * - 'K' For a kernel pointer that should be hidden from unprivileged users
+ * - 'NF' For a netdev_features_t
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -896,6 +909,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                       has_capability_noaudit(current, CAP_SYSLOG))))
                        ptr = NULL;
                break;
+       case 'N':
+               switch (fmt[1]) {
+               case 'F':
+                       return netdev_feature_string(buf, end, ptr, spec);
+               }
+               break;
        }
        spec.flags |= SMALL;
        if (spec.field_width == -1) {
index a0860640378d87b40827bdb4a89ec271c5c8f35c..71034f41a2ba92ca17c902e2f9b75d568294987f 100644 (file)
@@ -724,6 +724,14 @@ void bdi_destroy(struct backing_dev_info *bdi)
 
        bdi_unregister(bdi);
 
+       /*
+        * If bdi_unregister() had already been called earlier, the
+        * wakeup_timer could still be armed because bdi_prune_sb()
+        * can race with the bdi_wakeup_thread_delayed() calls from
+        * __mark_inode_dirty().
+        */
+       del_timer_sync(&bdi->wb.wakeup_timer);
+
        for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
                percpu_counter_destroy(&bdi->bdi_stat[i]);
 
index c0018f2d50e04e2ea03045989b742254be0a8489..5f0a3c91fdac043437392bbe141b45c30cdcf66e 100644 (file)
@@ -1828,7 +1828,7 @@ repeat:
                page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
-               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
                        page_cache_release(page);
                        if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
  * @gfp:       the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
@@ -2407,7 +2404,6 @@ static ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
-
                /*
                 * Bring in the user page that we will copy from _first_.
                 * Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2459,10 @@ again:
                written += copied;
 
                balance_dirty_pages_ratelimited(mapping);
-
+               if (fatal_signal_pending(current)) {
+                       status = -EINTR;
+                       break;
+               }
        } while (iov_iter_count(i));
 
        return written ? written : status;
index 4298abaae153033caafe1f8ac641fdef1dee097e..36b3d988b4ef6ac8c263ee0732c1d08513afb04f 100644 (file)
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
 
 static void khugepaged_alloc_sleep(void)
 {
-       DEFINE_WAIT(wait);
-       add_wait_queue(&khugepaged_wait, &wait);
-       schedule_timeout_interruptible(
-               msecs_to_jiffies(
-                       khugepaged_alloc_sleep_millisecs));
-       remove_wait_queue(&khugepaged_wait, &wait);
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
 #ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
                if (unlikely(kthread_should_stop()))
                        break;
                if (khugepaged_has_work()) {
-                       DEFINE_WAIT(wait);
                        if (!khugepaged_scan_sleep_millisecs)
                                continue;
-                       add_wait_queue(&khugepaged_wait, &wait);
-                       schedule_timeout_interruptible(
-                               msecs_to_jiffies(
-                                       khugepaged_scan_sleep_millisecs));
-                       remove_wait_queue(&khugepaged_wait, &wait);
+                       wait_event_freezable_timeout(khugepaged_wait, false,
+                           msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
                } else if (khugepaged_enabled())
                        wait_event_freezable(khugepaged_wait,
                                             khugepaged_wait_event());
index dae27ba3be2c8523cd032ad330e2f5a32fbffaf3..2316840b337a37447d2d8cbe294f53df71e53575 100644 (file)
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -900,7 +901,6 @@ retry:
        h->resv_huge_pages += delta;
        ret = 0;
 
-       spin_unlock(&hugetlb_lock);
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
@@ -914,6 +914,7 @@ retry:
                VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+       spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
 free:
@@ -2422,6 +2423,8 @@ retry_avoidcopy:
         * anon_vma prepared.
         */
        if (unlikely(anon_vma_prepare(vma))) {
+               page_cache_release(new_page);
+               page_cache_release(old_page);
                /* Caller expects lock to be held */
                spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
index 6aff93c98acaf6020eabd37f2e9b49a04135f187..94da8ee9e2c2a4b88eb3f58ae341f16435da701e 100644 (file)
@@ -50,6 +50,8 @@
 #include <linux/cpu.h>
 #include <linux/oom.h>
 #include "internal.h"
+#include <net/sock.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -286,6 +288,10 @@ struct mem_cgroup {
         */
        struct mem_cgroup_stat_cpu nocpu_base;
        spinlock_t pcp_counter_lock;
+
+#ifdef CONFIG_INET
+       struct tcp_memcontrol tcp_mem;
+#endif
 };
 
 /* Stuffs for move charges at task migration. */
@@ -365,7 +371,58 @@ enum charge_type {
 
 static void mem_cgroup_get(struct mem_cgroup *memcg);
 static void mem_cgroup_put(struct mem_cgroup *memcg);
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+
+/* Writing them here to avoid exposing memcg's inner layout */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_INET
+#include <net/sock.h>
+#include <net/ip.h>
+
+static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
+void sock_update_memcg(struct sock *sk)
+{
+       /* A socket spends its whole life in the same cgroup */
+       if (sk->sk_cgrp) {
+               WARN_ON(1);
+               return;
+       }
+       if (static_branch(&memcg_socket_limit_enabled)) {
+               struct mem_cgroup *memcg;
+
+               BUG_ON(!sk->sk_prot->proto_cgroup);
+
+               rcu_read_lock();
+               memcg = mem_cgroup_from_task(current);
+               if (!mem_cgroup_is_root(memcg)) {
+                       mem_cgroup_get(memcg);
+                       sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+               }
+               rcu_read_unlock();
+       }
+}
+EXPORT_SYMBOL(sock_update_memcg);
+
+void sock_release_memcg(struct sock *sk)
+{
+       if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
+               struct mem_cgroup *memcg;
+               WARN_ON(!sk->sk_cgrp->memcg);
+               memcg = sk->sk_cgrp->memcg;
+               mem_cgroup_put(memcg);
+       }
+}
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
+{
+       if (!memcg || mem_cgroup_is_root(memcg))
+               return NULL;
+
+       return &memcg->tcp_mem.cg_proto;
+}
+EXPORT_SYMBOL(tcp_proto_cgroup);
+#endif /* CONFIG_INET */
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+
 static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 static struct mem_cgroup_per_zone *
@@ -745,7 +802,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
        preempt_enable();
 }
 
-static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 {
        return container_of(cgroup_subsys_state(cont,
                                mem_cgroup_subsys_id), struct mem_cgroup,
@@ -4612,6 +4669,36 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
 }
 #endif /* CONFIG_NUMA */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+       /*
+        * Part of this would be better living in a separate allocation
+        * function, leaving us with just the cgroup tree population work.
+        * We, however, depend on state such as network's proto_list that
+        * is only initialized after cgroup creation. I found the less
+        * cumbersome way to deal with it to defer it all to populate time
+        */
+       return mem_cgroup_sockets_init(cont, ss);
+};
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+                               struct cgroup *cont)
+{
+       mem_cgroup_sockets_destroy(cont, ss);
+}
+#else
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+       return 0;
+}
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+                               struct cgroup *cont)
+{
+}
+#endif
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4843,12 +4930,13 @@ static void mem_cgroup_put(struct mem_cgroup *memcg)
 /*
  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  */
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 {
        if (!memcg->res.parent)
                return NULL;
        return mem_cgroup_from_res_counter(memcg->res.parent, res);
 }
+EXPORT_SYMBOL(parent_mem_cgroup);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
@@ -4907,9 +4995,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                int cpu;
                enable_swap_cgroup();
                parent = NULL;
-               root_mem_cgroup = memcg;
                if (mem_cgroup_soft_limit_tree_init())
                        goto free_out;
+               root_mem_cgroup = memcg;
                for_each_possible_cpu(cpu) {
                        struct memcg_stock_pcp *stock =
                                                &per_cpu(memcg_stock, cpu);
@@ -4948,7 +5036,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        return &memcg->css;
 free_out:
        __mem_cgroup_free(memcg);
-       root_mem_cgroup = NULL;
        return ERR_PTR(error);
 }
 
@@ -4965,6 +5052,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
+       kmem_cgroup_destroy(ss, cont);
+
        mem_cgroup_put(memcg);
 }
 
@@ -4978,6 +5067,10 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
 
        if (!ret)
                ret = register_memsw_files(cont, ss);
+
+       if (!ret)
+               ret = register_kmem_files(cont, ss);
+
        return ret;
 }
 
index adc395481813532efe82dced222e79115b075b0e..c3fdbcb17658ce405131e5b0310e1857fd6558bc 100644 (file)
@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        struct vm_area_struct *prev;
        struct vm_area_struct *vma;
        int err = 0;
+       pgoff_t pgoff;
        unsigned long vmstart;
        unsigned long vmend;
 
@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        if (!vma || vma->vm_start > start)
                return -EFAULT;
 
+       if (start > vma->vm_start)
+               prev = vma;
+
        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
                next = vma->vm_next;
                vmstart = max(start, vma->vm_start);
                vmend   = min(end, vma->vm_end);
 
+               if (mpol_equal(vma_policy(vma), new_pol))
+                       continue;
+
+               pgoff = vma->vm_pgoff +
+                       ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+                                 vma->anon_vma, vma->vm_file, pgoff,
                                  new_pol);
                if (prev) {
                        vma = prev;
index 578e29174fa6a0b84e8cbb10f8bf22ffd9a37b57..177aca424a069ac1ae1b44d48a8e6d992cd42a4d 100644 (file)
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (anon_vma)
                put_anon_vma(anon_vma);
-out:
        unlock_page(hpage);
 
+out:
        if (rc != -EAGAIN) {
                list_del(&hpage->lru);
                put_page(hpage);
index 73419c55eda6ddbbdfb3ece3e6d6e477ebd78600..b982290fd962dc2745c423d44e7b92f9c1db247a 100644 (file)
@@ -454,7 +454,7 @@ void  __attribute__((weak)) vmalloc_sync_all(void)
  *     between processes, it syncs the pagetable across all
  *     processes.
  */
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 {
        BUG();
        return NULL;
index 471dedb463ab30f9e57d793c5367cac966098c06..069b64e521fccf2725199550653bfdb7055d6f81 100644 (file)
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
                      const nodemask_t *nodemask, unsigned long totalpages)
 {
-       int points;
+       long points;
 
        if (oom_unkillable_task(p, mem, nodemask))
                return 0;
@@ -185,6 +185,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
        if (!p)
                return 0;
 
+       if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+               task_unlock(p);
+               return 0;
+       }
+
        /*
         * The memory controller may have a limit of 0 bytes, so avoid a divide
         * by zero, if necessary.
index a3278f005230a07ac5bf42717ae55bfb114f0239..50f08241f9815668d73b1adfcd5c9585f11bbc15 100644 (file)
@@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
  *
  */
 static struct prop_descriptor vm_completions;
-static struct prop_descriptor vm_dirties;
 
 /*
  * couple the period to the dirty_ratio:
@@ -154,7 +153,6 @@ static void update_completion_period(void)
 {
        int shift = calc_period_shift();
        prop_change_shift(&vm_completions, shift);
-       prop_change_shift(&vm_dirties, shift);
 
        writeback_set_ratelimit();
 }
@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
 }
 EXPORT_SYMBOL_GPL(bdi_writeout_inc);
 
-void task_dirty_inc(struct task_struct *tsk)
-{
-       prop_inc_single(&vm_dirties, &tsk->dirties);
-}
-
 /*
  * Obtain an accurate fraction of the BDI's portion.
  */
@@ -418,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
  *
  * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
  * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
  *
  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
  * - starving fast devices
@@ -601,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
         */
        if (unlikely(bdi_thresh > thresh))
                bdi_thresh = thresh;
+       /*
+        * It's very possible that bdi_thresh is close to 0 not because the
+        * device is slow, but that it has remained inactive for long time.
+        * Honour such devices a reasonable good (hopefully IO efficient)
+        * threshold, so that the occasional writes won't be blocked and active
+        * writes can rampup the threshold quickly.
+        */
        bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
        /*
         * scale global setpoint to bdi's:
@@ -984,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
         *
         * 8 serves as the safety ratio.
         */
-       if (bdi_dirty)
-               t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+       t = min(t, bdi_dirty * HZ / (8 * bw + 1));
 
        /*
         * The pause time will be settled within range (max_pause/4, max_pause).
@@ -1133,17 +1137,30 @@ pause:
                                          pages_dirtied,
                                          pause,
                                          start_time);
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               __set_current_state(TASK_KILLABLE);
                io_schedule_timeout(pause);
 
-               dirty_thresh = hard_dirty_limit(dirty_thresh);
                /*
-                * max-pause area. If dirty exceeded but still within this
-                * area, no need to sleep for more than 200ms: (a) 8 pages per
-                * 200ms is typically more than enough to curb heavy dirtiers;
-                * (b) the pause time limit makes the dirtiers more responsive.
+                * This is typically equal to (nr_dirty < dirty_thresh) and can
+                * also keep "1000+ dd on a slow USB stick" under control.
                 */
-               if (nr_dirty < dirty_thresh)
+               if (task_ratelimit)
+                       break;
+
+               /*
+                * In the case of an unresponding NFS server and the NFS dirty
+                * pages exceeds dirty_thresh, give the other good bdi's a pipe
+                * to go through, so that tasks on them still remain responsive.
+                *
+                * In theory 1 page is enough to keep the comsumer-producer
+                * pipe going: the flusher cleans 1 page => the task dirties 1
+                * more page. However bdi_dirty has accounting errors.  So use
+                * the larger and more IO friendly bdi_stat_error.
+                */
+               if (bdi_dirty <= bdi_stat_error(bdi))
+                       break;
+
+               if (fatal_signal_pending(current))
                        break;
        }
 
@@ -1395,7 +1412,6 @@ void __init page_writeback_init(void)
 
        shift = calc_period_shift();
        prop_descriptor_init(&vm_completions, shift);
-       prop_descriptor_init(&vm_dirties, shift);
 }
 
 /**
@@ -1724,7 +1740,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
                __inc_zone_page_state(page, NR_DIRTIED);
                __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
                __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
-               task_dirty_inc(current);
                task_io_account_write(PAGE_CACHE_SIZE);
        }
 }
index 9dd443d89d8be665813bbeb4e17e54fafde46428..2b8ba3aebf6e2c6b46b0d12dfea058ee3ab022fe 100644 (file)
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
-
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        unsigned long block_migratetype;
        int reserve;
 
-       /* Get the start pfn, end pfn and the number of blocks to reserve */
+       /*
+        * Get the start pfn, end pfn and the number of blocks to reserve
+        * We have to be careful to be aligned to pageblock_nr_pages to
+        * make sure that we always check pfn_valid for the first page in
+        * the block.
+        */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
+       start_pfn = roundup(start_pfn, pageblock_nr_pages);
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
index ea534960a04bcda7e87a18cacf67bd2eb1fc5fe0..12a48a88c0d8cb00dd55b2adcea3e536493dee1a 100644 (file)
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
 
        if (!pages || !bitmap) {
                if (may_alloc && !pages)
-                       pages = pcpu_mem_alloc(pages_size);
+                       pages = pcpu_mem_zalloc(pages_size);
                if (may_alloc && !bitmap)
-                       bitmap = pcpu_mem_alloc(bitmap_size);
+                       bitmap = pcpu_mem_zalloc(bitmap_size);
                if (!pages || !bitmap)
                        return NULL;
        }
 
-       memset(pages, 0, pages_size);
        bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
 
        *bitmapp = bitmap;
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
                                 int page_start, int page_end)
 {
        flush_cache_vunmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
                                      int page_start, int page_end)
 {
        flush_tlb_kernel_range(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
                                int page_start, int page_end)
 {
        flush_cache_vmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 /**
index bf80e55dbed7e66fcd28f6dbc46c86fbd222a008..716eb4acf2fc29cf9500ad3972eb00b5413ab446 100644 (file)
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
 static int pcpu_nr_slots __read_mostly;
 static size_t pcpu_chunk_struct_size __read_mostly;
 
-/* cpus with the lowest and highest unit numbers */
-static unsigned int pcpu_first_unit_cpu __read_mostly;
-static unsigned int pcpu_last_unit_cpu __read_mostly;
+/* cpus with the lowest and highest unit addresses */
+static unsigned int pcpu_low_unit_cpu __read_mostly;
+static unsigned int pcpu_high_unit_cpu __read_mostly;
 
 /* the address of the first chunk which starts with the kernel static area */
 void *pcpu_base_addr __read_mostly;
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
             (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 
 /**
- * pcpu_mem_alloc - allocate memory
+ * pcpu_mem_zalloc - allocate memory
  * @size: bytes to allocate
  *
  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
- * kzalloc() is used; otherwise, vmalloc() is used.  The returned
+ * kzalloc() is used; otherwise, vzalloc() is used.  The returned
  * memory is always zeroed.
  *
  * CONTEXT:
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
  * RETURNS:
  * Pointer to the allocated area on success, NULL on failure.
  */
-static void *pcpu_mem_alloc(size_t size)
+static void *pcpu_mem_zalloc(size_t size)
 {
        if (WARN_ON_ONCE(!slab_is_available()))
                return NULL;
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size)
  * @ptr: memory to free
  * @size: size of the area
  *
- * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
+ * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
  */
 static void pcpu_mem_free(void *ptr, size_t size)
 {
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
        size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
        unsigned long flags;
 
-       new = pcpu_mem_alloc(new_size);
+       new = pcpu_mem_zalloc(new_size);
        if (!new)
                return -ENOMEM;
 
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
 {
        struct pcpu_chunk *chunk;
 
-       chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
+       chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
        if (!chunk)
                return NULL;
 
-       chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+       chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
+                                               sizeof(chunk->map[0]));
        if (!chunk->map) {
                kfree(chunk);
                return NULL;
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr)
  * address.  The caller is responsible for ensuring @addr stays valid
  * until this function finishes.
  *
+ * percpu allocator has special setup for the first chunk, which currently
+ * supports either embedding in linear address space or vmalloc mapping,
+ * and, from the second one, the backing allocator (currently either vm or
+ * km) provides translation.
+ *
+ * The addr can be tranlated simply without checking if it falls into the
+ * first chunk. But the current code reflects better how percpu allocator
+ * actually works, and the verification can discover both bugs in percpu
+ * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
+ * code.
+ *
  * RETURNS:
  * The physical address for @addr.
  */
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
 {
        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
        bool in_first_chunk = false;
-       unsigned long first_start, first_end;
+       unsigned long first_low, first_high;
        unsigned int cpu;
 
        /*
-        * The following test on first_start/end isn't strictly
+        * The following test on unit_low/high isn't strictly
         * necessary but will speed up lookups of addresses which
         * aren't in the first chunk.
         */
-       first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
-       first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
-                                   pcpu_unit_pages);
-       if ((unsigned long)addr >= first_start &&
-           (unsigned long)addr < first_end) {
+       first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
+       first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
+                                    pcpu_unit_pages);
+       if ((unsigned long)addr >= first_low &&
+           (unsigned long)addr < first_high) {
                for_each_possible_cpu(cpu) {
                        void *start = per_cpu_ptr(base, cpu);
 
@@ -1011,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
                if (!is_vmalloc_addr(addr))
                        return __pa(addr);
                else
-                       return page_to_phys(vmalloc_to_page(addr));
+                       return page_to_phys(vmalloc_to_page(addr)) +
+                              offset_in_page(addr);
        } else
-               return page_to_phys(pcpu_addr_to_page(addr));
+               return page_to_phys(pcpu_addr_to_page(addr)) +
+                      offset_in_page(addr);
 }
 
 /**
@@ -1233,7 +1247,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
                unit_map[cpu] = UINT_MAX;
-       pcpu_first_unit_cpu = NR_CPUS;
+
+       pcpu_low_unit_cpu = NR_CPUS;
+       pcpu_high_unit_cpu = NR_CPUS;
 
        for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
                const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1269,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
                        unit_map[cpu] = unit + i;
                        unit_off[cpu] = gi->base_offset + i * ai->unit_size;
 
-                       if (pcpu_first_unit_cpu == NR_CPUS)
-                               pcpu_first_unit_cpu = cpu;
-                       pcpu_last_unit_cpu = cpu;
+                       /* determine low/high unit_cpu */
+                       if (pcpu_low_unit_cpu == NR_CPUS ||
+                           unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
+                               pcpu_low_unit_cpu = cpu;
+                       if (pcpu_high_unit_cpu == NR_CPUS ||
+                           unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
+                               pcpu_high_unit_cpu = cpu;
                }
        }
        pcpu_nr_units = unit;
@@ -1889,7 +1909,7 @@ void __init percpu_init_late(void)
 
                BUILD_BUG_ON(size > PAGE_SIZE);
 
-               map = pcpu_mem_alloc(size);
+               map = pcpu_mem_zalloc(size);
                BUG_ON(!map);
 
                spin_lock_irqsave(&pcpu_lock, flags);
index 708efe886154da626cfe01f4a2b6eaf055ada914..83311c9aaf9de0ad8494f04703c248f155cfc2c1 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -595,6 +595,7 @@ static enum {
        PARTIAL_AC,
        PARTIAL_L3,
        EARLY,
+       LATE,
        FULL
 } g_cpucache_up;
 
@@ -671,7 +672,7 @@ static void init_node_lock_keys(int q)
 {
        struct cache_sizes *s = malloc_sizes;
 
-       if (g_cpucache_up != FULL)
+       if (g_cpucache_up < LATE)
                return;
 
        for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void)
 {
        struct kmem_cache *cachep;
 
+       g_cpucache_up = LATE;
+
        /* Annotate slab for lockdep -- annotate the malloc caches */
        init_lock_keys();
 
index 7d2a996c307e4306bd233f4ae340a02d6915ffb1..ed3334d9b6da77f64796f1933f7672284b159fc2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
 {
        struct kmem_cache_node *n = NULL;
        struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
-       struct page *page;
+       struct page *page, *discard_page = NULL;
 
        while ((page = c->partial)) {
                enum slab_modes { M_PARTIAL, M_FREE };
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
                                if (l == M_PARTIAL)
                                        remove_partial(n, page);
                                else
-                                       add_partial(n, page, 1);
+                                       add_partial(n, page,
+                                               DEACTIVATE_TO_TAIL);
 
                                l = m;
                        }
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
                                "unfreezing slab"));
 
                if (m == M_FREE) {
-                       stat(s, DEACTIVATE_EMPTY);
-                       discard_slab(s, page);
-                       stat(s, FREE_SLAB);
+                       page->next = discard_page;
+                       discard_page = page;
                }
        }
 
        if (n)
                spin_unlock(&n->list_lock);
+
+       while (discard_page) {
+               page = discard_page;
+               discard_page = discard_page->next;
+
+               stat(s, DEACTIVATE_EMPTY);
+               discard_slab(s, page);
+               stat(s, FREE_SLAB);
+       }
 }
 
 /*
@@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                page->pobjects = pobjects;
                page->next = oldpage;
 
-       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+       } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
        stat(s, CPU_PARTIAL_FREE);
        return pobjects;
 }
@@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 
                for_each_possible_cpu(cpu) {
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+                       int node = ACCESS_ONCE(c->node);
                        struct page *page;
 
-                       if (!c || c->node < 0)
+                       if (node < 0)
                                continue;
-
-                       if (c->page) {
-                                       if (flags & SO_TOTAL)
-                                               x = c->page->objects;
+                       page = ACCESS_ONCE(c->page);
+                       if (page) {
+                               if (flags & SO_TOTAL)
+                                       x = page->objects;
                                else if (flags & SO_OBJECTS)
-                                       x = c->page->inuse;
+                                       x = page->inuse;
                                else
                                        x = 1;
 
                                total += x;
-                               nodes[c->node] += x;
+                               nodes[node] += x;
                        }
                        page = c->partial;
 
                        if (page) {
                                x = page->pobjects;
-                                total += x;
-                                nodes[c->node] += x;
+                               total += x;
+                               nodes[node] += x;
                        }
-                       per_cpu[c->node]++;
+                       per_cpu[node]++;
                }
        }
 
index b669aa6f6caff34f41fac15a527a791576f9026a..27be2f0d4cb707b4c817175a4c4452e915fc1aa4 100644 (file)
@@ -1290,7 +1290,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
                unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
-       static struct vmap_area *va;
+       struct vmap_area *va;
        struct vm_struct *area;
 
        BUG_ON(in_interrupt());
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                goto fail;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+       if (!addr)
+               return NULL;
 
        /*
         * In this function, newly allocated vm_struct is not added
@@ -2141,23 +2143,30 @@ void  __attribute__((weak)) vmalloc_sync_all(void)
 
 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
 {
-       /* apply_to_page_range() does all the hard work. */
+       pte_t ***p = data;
+
+       if (p) {
+               *(*p) = pte;
+               (*p)++;
+       }
        return 0;
 }
 
 /**
  *     alloc_vm_area - allocate a range of kernel address space
  *     @size:          size of the area
+ *     @ptes:          returns the PTEs for the address space
  *
  *     Returns:        NULL on failure, vm_struct on success
  *
  *     This function reserves a range of kernel address space, and
  *     allocates pagetables to map that range.  No actual mappings
- *     are created.  If the kernel address space is not shared
- *     between processes, it syncs the pagetable across all
- *     processes.
+ *     are created.
+ *
+ *     If @ptes is non-NULL, pointers to the PTEs (in init_mm)
+ *     allocated for the VM area are returned.
  */
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 {
        struct vm_struct *area;
 
@@ -2171,19 +2180,11 @@ struct vm_struct *alloc_vm_area(size_t size)
         * of kernel virtual address space and mapped into init_mm.
         */
        if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
-                               area->size, f, NULL)) {
+                               size, f, ptes ? &ptes : NULL)) {
                free_vm_area(area);
                return NULL;
        }
 
-       /*
-        * If the allocated address space is passed to a hypercall
-        * before being used then we cannot rely on a page fault to
-        * trigger an update of the page tables.  So sync all the page
-        * tables here.
-        */
-       vmalloc_sync_all();
-
        return area;
 }
 EXPORT_SYMBOL_GPL(alloc_vm_area);
index a1893c050795b6e92f36a71cd0f5dd985f334a2c..f54a05b7a61d9eb658562b191996beb9d4cea397 100644 (file)
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
  */
 void register_shrinker(struct shrinker *shrinker)
 {
-       shrinker->nr = 0;
+       atomic_long_set(&shrinker->nr_in_batch, 0);
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
        up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
-               unsigned long total_scan;
-               unsigned long max_pass;
+               long total_scan;
+               long max_pass;
                int shrink_ret = 0;
                long nr;
                long new_nr;
                long batch_size = shrinker->batch ? shrinker->batch
                                                  : SHRINK_BATCH;
 
+               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               if (max_pass <= 0)
+                       continue;
+
                /*
                 * copy the current shrinker scan count into a local variable
                 * and zero it so that other concurrent shrinker invocations
                 * don't also do this scanning work.
                 */
-               do {
-                       nr = shrinker->nr;
-               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+               nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
 
                total_scan = nr;
-               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                 * manner that handles concurrent updates. If we exhausted the
                 * scan, there is no need to do an update.
                 */
-               do {
-                       nr = shrinker->nr;
-                       new_nr = total_scan + nr;
-                       if (total_scan <= 0)
-                               break;
-               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+               if (total_scan > 0)
+                       new_nr = atomic_long_add_return(total_scan,
+                                       &shrinker->nr_in_batch);
+               else
+                       new_nr = atomic_long_read(&shrinker->nr_in_batch);
 
                trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }
index 5471628d3ffe73fd0f8b3cc8d3504cfeae78019c..efea35b02e7ff989745c53844b06ab190d0fcc6a 100644 (file)
@@ -51,27 +51,6 @@ const char vlan_version[] = DRV_VERSION;
 
 /* End of global variables definitions. */
 
-static void vlan_group_free(struct vlan_group *grp)
-{
-       int i;
-
-       for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
-               kfree(grp->vlan_devices_arrays[i]);
-       kfree(grp);
-}
-
-static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
-{
-       struct vlan_group *grp;
-
-       grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
-       if (!grp)
-               return NULL;
-
-       grp->real_dev = real_dev;
-       return grp;
-}
-
 static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
 {
        struct net_device **array;
@@ -92,32 +71,29 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
        return 0;
 }
 
-static void vlan_rcu_free(struct rcu_head *rcu)
-{
-       vlan_group_free(container_of(rcu, struct vlan_group, rcu));
-}
-
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
-       const struct net_device_ops *ops = real_dev->netdev_ops;
+       struct vlan_info *vlan_info;
        struct vlan_group *grp;
        u16 vlan_id = vlan->vlan_id;
 
        ASSERT_RTNL();
 
-       grp = rtnl_dereference(real_dev->vlgrp);
-       BUG_ON(!grp);
+       vlan_info = rtnl_dereference(real_dev->vlan_info);
+       BUG_ON(!vlan_info);
+
+       grp = &vlan_info->grp;
 
        /* Take it out of our own structures, but be sure to interlock with
         * HW accelerating devices or SW vlan input packet processing if
         * VLAN is not 0 (leave it there for 802.1p).
         */
-       if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
-               ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+       if (vlan_id)
+               vlan_vid_del(real_dev, vlan_id);
 
-       grp->nr_vlans--;
+       grp->nr_vlan_devs--;
 
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_leave(dev);
@@ -129,16 +105,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
         */
        unregister_netdevice_queue(dev, head);
 
-       /* If the group is now empty, kill off the group. */
-       if (grp->nr_vlans == 0) {
+       if (grp->nr_vlan_devs == 0)
                vlan_gvrp_uninit_applicant(real_dev);
 
-               RCU_INIT_POINTER(real_dev->vlgrp, NULL);
-
-               /* Free the group, after all cpu's are done. */
-               call_rcu(&grp->rcu, vlan_rcu_free);
-       }
-
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
 }
@@ -167,21 +136,26 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
 
 int register_vlan_dev(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
-       const struct net_device_ops *ops = real_dev->netdev_ops;
        u16 vlan_id = vlan->vlan_id;
-       struct vlan_group *grp, *ngrp = NULL;
+       struct vlan_info *vlan_info;
+       struct vlan_group *grp;
        int err;
 
-       grp = rtnl_dereference(real_dev->vlgrp);
-       if (!grp) {
-               ngrp = grp = vlan_group_alloc(real_dev);
-               if (!grp)
-                       return -ENOBUFS;
+       err = vlan_vid_add(real_dev, vlan_id);
+       if (err)
+               return err;
+
+       vlan_info = rtnl_dereference(real_dev->vlan_info);
+       /* vlan_info should be there now. vlan_vid_add took care of it */
+       BUG_ON(!vlan_info);
+
+       grp = &vlan_info->grp;
+       if (grp->nr_vlan_devs == 0) {
                err = vlan_gvrp_init_applicant(real_dev);
                if (err < 0)
-                       goto out_free_group;
+                       goto out_vid_del;
        }
 
        err = vlan_group_prealloc_vid(grp, vlan_id);
@@ -192,7 +166,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_applicant;
 
-       /* Account for reference in struct vlan_dev_info */
+       /* Account for reference in struct vlan_dev_priv */
        dev_hold(real_dev);
 
        netif_stacked_transfer_operstate(real_dev, dev);
@@ -202,24 +176,15 @@ int register_vlan_dev(struct net_device *dev)
         * it into our local structure.
         */
        vlan_group_set_device(grp, vlan_id, dev);
-       grp->nr_vlans++;
-
-       if (ngrp) {
-               rcu_assign_pointer(real_dev->vlgrp, ngrp);
-       }
-       if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
-               ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
+       grp->nr_vlan_devs++;
 
        return 0;
 
 out_uninit_applicant:
-       if (ngrp)
+       if (grp->nr_vlan_devs == 0)
                vlan_gvrp_uninit_applicant(real_dev);
-out_free_group:
-       if (ngrp) {
-               /* Free the group, after all cpu's are done. */
-               call_rcu(&ngrp->rcu, vlan_rcu_free);
-       }
+out_vid_del:
+       vlan_vid_del(real_dev, vlan_id);
        return err;
 }
 
@@ -267,7 +232,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
                snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
        }
 
-       new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
+       new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);
 
        if (new_dev == NULL)
                return -ENOBUFS;
@@ -278,10 +243,10 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
         */
        new_dev->mtu = real_dev->mtu;
 
-       vlan_dev_info(new_dev)->vlan_id = vlan_id;
-       vlan_dev_info(new_dev)->real_dev = real_dev;
-       vlan_dev_info(new_dev)->dent = NULL;
-       vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+       vlan_dev_priv(new_dev)->vlan_id = vlan_id;
+       vlan_dev_priv(new_dev)->real_dev = real_dev;
+       vlan_dev_priv(new_dev)->dent = NULL;
+       vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
 
        new_dev->rtnl_link_ops = &vlan_link_ops;
        err = register_vlan_dev(new_dev);
@@ -298,7 +263,7 @@ out_free_newdev:
 static void vlan_sync_address(struct net_device *dev,
                              struct net_device *vlandev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
        /* May be called without an actual change */
        if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
@@ -360,25 +325,26 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
 {
        struct net_device *dev = ptr;
        struct vlan_group *grp;
+       struct vlan_info *vlan_info;
        int i, flgs;
        struct net_device *vlandev;
-       struct vlan_dev_info *vlan;
+       struct vlan_dev_priv *vlan;
        LIST_HEAD(list);
 
        if (is_vlan_dev(dev))
                __vlan_device_event(dev, event);
 
        if ((event == NETDEV_UP) &&
-           (dev->features & NETIF_F_HW_VLAN_FILTER) &&
-           dev->netdev_ops->ndo_vlan_rx_add_vid) {
+           (dev->features & NETIF_F_HW_VLAN_FILTER)) {
                pr_info("adding VLAN 0 to HW filter on device %s\n",
                        dev->name);
-               dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
+               vlan_vid_add(dev, 0);
        }
 
-       grp = rtnl_dereference(dev->vlgrp);
-       if (!grp)
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info)
                goto out;
+       grp = &vlan_info->grp;
 
        /* It is OK that we do not hold the group lock right now,
         * as we run under the RTNL lock.
@@ -447,7 +413,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (!(flgs & IFF_UP))
                                continue;
 
-                       vlan = vlan_dev_info(vlandev);
+                       vlan = vlan_dev_priv(vlandev);
                        if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                                dev_change_flags(vlandev, flgs & ~IFF_UP);
                        netif_stacked_transfer_operstate(dev, vlandev);
@@ -465,7 +431,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (flgs & IFF_UP)
                                continue;
 
-                       vlan = vlan_dev_info(vlandev);
+                       vlan = vlan_dev_priv(vlandev);
                        if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                                dev_change_flags(vlandev, flgs | IFF_UP);
                        netif_stacked_transfer_operstate(dev, vlandev);
@@ -482,9 +448,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (!vlandev)
                                continue;
 
-                       /* unregistration of last vlan destroys group, abort
+                       /* removal of last vid destroys vlan_info, abort
                         * afterwards */
-                       if (grp->nr_vlans == 1)
+                       if (vlan_info->nr_vids == 1)
                                i = VLAN_N_VID;
 
                        unregister_vlan_dev(vlandev, &list);
index 9fd45f3571f9a7c8b0ba0a9431578a0a8b0b674f..a4886d94c40c453cf0032cc24a207dacb438c054 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/if_vlan.h>
 #include <linux/u64_stats_sync.h>
+#include <linux/list.h>
 
 
 /**
@@ -40,8 +41,10 @@ struct vlan_pcpu_stats {
        u32                     tx_dropped;
 };
 
+struct netpoll;
+
 /**
- *     struct vlan_dev_info - VLAN private device data
+ *     struct vlan_dev_priv - VLAN private device data
  *     @nr_ingress_mappings: number of ingress priority mappings
  *     @ingress_priority_map: ingress priority mappings
  *     @nr_egress_mappings: number of egress priority mappings
@@ -53,7 +56,7 @@ struct vlan_pcpu_stats {
  *     @dent: proc dir entry
  *     @vlan_pcpu_stats: ptr to percpu rx stats
  */
-struct vlan_dev_info {
+struct vlan_dev_priv {
        unsigned int                            nr_ingress_mappings;
        u32                                     ingress_priority_map[8];
        unsigned int                            nr_egress_mappings;
@@ -67,13 +70,39 @@ struct vlan_dev_info {
 
        struct proc_dir_entry                   *dent;
        struct vlan_pcpu_stats __percpu         *vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll                          *netpoll;
+#endif
 };
 
-static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
 {
        return netdev_priv(dev);
 }
 
+/* if this changes, algorithm will have to be reworked because this
+ * depends on completely exhausting the VLAN identifier space.  Thus
+ * it gives constant time look-up, but in many cases it wastes memory.
+ */
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
+#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
+
+struct vlan_group {
+       unsigned int            nr_vlan_devs;
+       struct hlist_node       hlist;  /* linked list */
+       struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
+};
+
+struct vlan_info {
+       struct net_device       *real_dev; /* The ethernet(like) device
+                                           * the vlan is attached to.
+                                           */
+       struct vlan_group       grp;
+       struct list_head        vid_list;
+       unsigned int            nr_vids;
+       struct rcu_head         rcu;
+};
+
 static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
                                                       u16 vlan_id)
 {
@@ -97,10 +126,10 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
 static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
                                               u16 vlan_id)
 {
-       struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+       struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-       if (grp)
-               return vlan_group_get_device(grp, vlan_id);
+       if (vlan_info)
+               return vlan_group_get_device(&vlan_info->grp, vlan_id);
 
        return NULL;
 }
@@ -121,7 +150,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
                                            u16 vlan_tci)
 {
-       struct vlan_dev_info *vip = vlan_dev_info(dev);
+       struct vlan_dev_priv *vip = vlan_dev_priv(dev);
 
        return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
 }
index f5ffc02729d60396aac6abbdcbfc49bf1ebf20e1..4d39d802be2cb48dab7825ca4c2591c4fec013bb 100644 (file)
@@ -36,7 +36,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
                        skb->pkt_type = PACKET_HOST;
        }
 
-       if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+       if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
                unsigned int offset = skb->data - skb_mac_header(skb);
 
                /*
@@ -55,7 +55,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
        skb->vlan_tci = 0;
 
-       rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
+       rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
 
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->rx_packets++;
@@ -71,10 +71,10 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
 struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
                                        u16 vlan_id)
 {
-       struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+       struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-       if (grp) {
-               return vlan_group_get_device(grp, vlan_id);
+       if (vlan_info) {
+               return vlan_group_get_device(&vlan_info->grp, vlan_id);
        } else {
                /*
                 * Bonding slaves do not have grp assigned to themselves.
@@ -90,13 +90,13 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
-       return vlan_dev_info(dev)->real_dev;
+       return vlan_dev_priv(dev)->real_dev;
 }
 EXPORT_SYMBOL(vlan_dev_real_dev);
 
 u16 vlan_dev_vlan_id(const struct net_device *dev)
 {
-       return vlan_dev_info(dev)->vlan_id;
+       return vlan_dev_priv(dev)->vlan_id;
 }
 EXPORT_SYMBOL(vlan_dev_vlan_id);
 
@@ -110,39 +110,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
        return skb;
 }
 
-static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
-{
-       __be16 proto;
-       unsigned char *rawp;
-
-       /*
-        * Was a VLAN packet, grab the encapsulated protocol, which the layer
-        * three protocols care about.
-        */
-
-       proto = vhdr->h_vlan_encapsulated_proto;
-       if (ntohs(proto) >= 1536) {
-               skb->protocol = proto;
-               return;
-       }
-
-       rawp = skb->data;
-       if (*(unsigned short *) rawp == 0xFFFF)
-               /*
-                * This is a magic hack to spot IPX packets. Older Novell
-                * breaks the protocol design and runs IPX over 802.3 without
-                * an 802.2 LLC layer. We look for FFFF which isn't a used
-                * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
-                * but does for the rest.
-                */
-               skb->protocol = htons(ETH_P_802_3);
-       else
-               /*
-                * Real 802.2 LLC
-                */
-               skb->protocol = htons(ETH_P_802_2);
-}
-
 struct sk_buff *vlan_untag(struct sk_buff *skb)
 {
        struct vlan_hdr *vhdr;
@@ -179,3 +146,226 @@ err_free:
        kfree_skb(skb);
        return NULL;
 }
+
+
+/*
+ * vlan info and vid list
+ */
+
+static void vlan_group_free(struct vlan_group *grp)
+{
+       int i;
+
+       for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+               kfree(grp->vlan_devices_arrays[i]);
+}
+
+static void vlan_info_free(struct vlan_info *vlan_info)
+{
+       vlan_group_free(&vlan_info->grp);
+       kfree(vlan_info);
+}
+
+static void vlan_info_rcu_free(struct rcu_head *rcu)
+{
+       vlan_info_free(container_of(rcu, struct vlan_info, rcu));
+}
+
+static struct vlan_info *vlan_info_alloc(struct net_device *dev)
+{
+       struct vlan_info *vlan_info;
+
+       vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
+       if (!vlan_info)
+               return NULL;
+
+       vlan_info->real_dev = dev;
+       INIT_LIST_HEAD(&vlan_info->vid_list);
+       return vlan_info;
+}
+
+struct vlan_vid_info {
+       struct list_head list;
+       unsigned short vid;
+       int refcount;
+};
+
+static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
+                                              unsigned short vid)
+{
+       struct vlan_vid_info *vid_info;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+               if (vid_info->vid == vid)
+                       return vid_info;
+       }
+       return NULL;
+}
+
+static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
+{
+       struct vlan_vid_info *vid_info;
+
+       vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
+       if (!vid_info)
+               return NULL;
+       vid_info->vid = vid;
+
+       return vid_info;
+}
+
+static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
+                         struct vlan_vid_info **pvid_info)
+{
+       struct net_device *dev = vlan_info->real_dev;
+       const struct net_device_ops *ops = dev->netdev_ops;
+       struct vlan_vid_info *vid_info;
+       int err;
+
+       vid_info = vlan_vid_info_alloc(vid);
+       if (!vid_info)
+               return -ENOMEM;
+
+       if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+           ops->ndo_vlan_rx_add_vid) {
+               err =  ops->ndo_vlan_rx_add_vid(dev, vid);
+               if (err) {
+                       kfree(vid_info);
+                       return err;
+               }
+       }
+       list_add(&vid_info->list, &vlan_info->vid_list);
+       vlan_info->nr_vids++;
+       *pvid_info = vid_info;
+       return 0;
+}
+
+int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+       struct vlan_info *vlan_info;
+       struct vlan_vid_info *vid_info;
+       bool vlan_info_created = false;
+       int err;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info) {
+               vlan_info = vlan_info_alloc(dev);
+               if (!vlan_info)
+                       return -ENOMEM;
+               vlan_info_created = true;
+       }
+       vid_info = vlan_vid_info_get(vlan_info, vid);
+       if (!vid_info) {
+               err = __vlan_vid_add(vlan_info, vid, &vid_info);
+               if (err)
+                       goto out_free_vlan_info;
+       }
+       vid_info->refcount++;
+
+       if (vlan_info_created)
+               rcu_assign_pointer(dev->vlan_info, vlan_info);
+
+       return 0;
+
+out_free_vlan_info:
+       if (vlan_info_created)
+               kfree(vlan_info);
+       return err;
+}
+EXPORT_SYMBOL(vlan_vid_add);
+
+static void __vlan_vid_del(struct vlan_info *vlan_info,
+                          struct vlan_vid_info *vid_info)
+{
+       struct net_device *dev = vlan_info->real_dev;
+       const struct net_device_ops *ops = dev->netdev_ops;
+       unsigned short vid = vid_info->vid;
+       int err;
+
+       if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+            ops->ndo_vlan_rx_kill_vid) {
+               err = ops->ndo_vlan_rx_kill_vid(dev, vid);
+               if (err) {
+                       pr_warn("failed to kill vid %d for device %s\n",
+                               vid, dev->name);
+               }
+       }
+       list_del(&vid_info->list);
+       kfree(vid_info);
+       vlan_info->nr_vids--;
+}
+
+void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+       struct vlan_info *vlan_info;
+       struct vlan_vid_info *vid_info;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info)
+               return;
+
+       vid_info = vlan_vid_info_get(vlan_info, vid);
+       if (!vid_info)
+               return;
+       vid_info->refcount--;
+       if (vid_info->refcount == 0) {
+               __vlan_vid_del(vlan_info, vid_info);
+               if (vlan_info->nr_vids == 0) {
+                       RCU_INIT_POINTER(dev->vlan_info, NULL);
+                       call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
+               }
+       }
+}
+EXPORT_SYMBOL(vlan_vid_del);
+
+int vlan_vids_add_by_dev(struct net_device *dev,
+                        const struct net_device *by_dev)
+{
+       struct vlan_vid_info *vid_info;
+       struct vlan_info *vlan_info;
+       int err;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(by_dev->vlan_info);
+       if (!vlan_info)
+               return 0;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+               err = vlan_vid_add(dev, vid_info->vid);
+               if (err)
+                       goto unwind;
+       }
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(vid_info,
+                                            &vlan_info->vid_list,
+                                            list) {
+               vlan_vid_del(dev, vid_info->vid);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL(vlan_vids_add_by_dev);
+
+void vlan_vids_del_by_dev(struct net_device *dev,
+                         const struct net_device *by_dev)
+{
+       struct vlan_vid_info *vid_info;
+       struct vlan_info *vlan_info;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(by_dev->vlan_info);
+       if (!vlan_info)
+               return;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list)
+               vlan_vid_del(dev, vid_info->vid);
+}
+EXPORT_SYMBOL(vlan_vids_del_by_dev);
index bc252862458385ef5d37367a643fcdf9ef5ad9af..9988d4abb372b16a4c70995a047a87548aead7e4 100644 (file)
@@ -33,6 +33,7 @@
 #include "vlan.h"
 #include "vlanproc.h"
 #include <linux/if_vlan.h>
+#include <linux/netpoll.h>
 
 /*
  *     Rebuild the Ethernet MAC header. This is called after an ARP
@@ -72,7 +73,7 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
 {
        struct vlan_priority_tci_mapping *mp;
 
-       mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+       mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
        while (mp) {
                if (mp->priority == skb->priority) {
                        return mp->vlan_qos; /* This should already be shifted
@@ -103,10 +104,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
        u16 vlan_tci = 0;
        int rc;
 
-       if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+       if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
                vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
-               vlan_tci = vlan_dev_info(dev)->vlan_id;
+               vlan_tci = vlan_dev_priv(dev)->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
                vhdr->h_vlan_TCI = htons(vlan_tci);
 
@@ -129,7 +130,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                saddr = dev->dev_addr;
 
        /* Now make the underlying real hard header */
-       dev = vlan_dev_info(dev)->real_dev;
+       dev = vlan_dev_priv(dev)->real_dev;
        rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
        if (rc > 0)
                rc += vhdrlen;
@@ -149,27 +150,29 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
         * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
         */
        if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
-           vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+           vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) {
                u16 vlan_tci;
-               vlan_tci = vlan_dev_info(dev)->vlan_id;
+               vlan_tci = vlan_dev_priv(dev)->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
                skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
        }
 
-       skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
+       skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
        len = skb->len;
+       if (netpoll_tx_running(dev))
+               return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
        ret = dev_queue_xmit(skb);
 
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
                struct vlan_pcpu_stats *stats;
 
-               stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+               stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats);
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
                stats->tx_bytes += len;
                u64_stats_update_end(&stats->syncp);
        } else {
-               this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+               this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped);
        }
 
        return ret;
@@ -180,7 +183,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
        /* TODO: gotta make sure the underlying layer can handle it,
         * maybe an IFF_VLAN_CAPABLE flag for devices?
         */
-       if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
+       if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
                return -ERANGE;
 
        dev->mtu = new_mtu;
@@ -191,7 +194,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 void vlan_dev_set_ingress_priority(const struct net_device *dev,
                                   u32 skb_prio, u16 vlan_prio)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
                vlan->nr_ingress_mappings--;
@@ -204,7 +207,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
 int vlan_dev_set_egress_priority(const struct net_device *dev,
                                 u32 skb_prio, u16 vlan_prio)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct vlan_priority_tci_mapping *mp = NULL;
        struct vlan_priority_tci_mapping *np;
        u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
@@ -241,7 +244,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
 /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
 int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        u32 old_flags = vlan->flags;
 
        if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
@@ -261,12 +264,12 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
 
 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
 {
-       strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
+       strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
 }
 
 static int vlan_dev_open(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
        int err;
 
@@ -313,7 +316,7 @@ out:
 
 static int vlan_dev_stop(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
 
        dev_mc_unsync(real_dev, dev);
@@ -332,7 +335,7 @@ static int vlan_dev_stop(struct net_device *dev)
 
 static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        struct sockaddr *addr = p;
        int err;
 
@@ -358,7 +361,7 @@ out:
 
 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        struct ifreq ifrr;
        int err = -EOPNOTSUPP;
@@ -383,7 +386,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int err = 0;
 
@@ -397,7 +400,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
 static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
                                   struct scatterlist *sgl, unsigned int sgc)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = 0;
 
@@ -409,7 +412,7 @@ static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
 
 static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int len = 0;
 
@@ -421,7 +424,7 @@ static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
 
 static int vlan_dev_fcoe_enable(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -432,7 +435,7 @@ static int vlan_dev_fcoe_enable(struct net_device *dev)
 
 static int vlan_dev_fcoe_disable(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -443,7 +446,7 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
 
 static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -455,7 +458,7 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
                                    struct scatterlist *sgl, unsigned int sgc)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = 0;
 
@@ -468,7 +471,7 @@ static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
 
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 
        if (dev->flags & IFF_UP) {
                if (change & IFF_ALLMULTI)
@@ -480,8 +483,8 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
-       dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -519,7 +522,7 @@ static const struct net_device_ops vlan_netdev_ops;
 
 static int vlan_dev_init(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        int subclass = 0;
 
        netif_carrier_off(dev);
@@ -568,8 +571,8 @@ static int vlan_dev_init(struct net_device *dev)
 
        vlan_dev_set_lockdep_class(dev, subclass);
 
-       vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
-       if (!vlan_dev_info(dev)->vlan_pcpu_stats)
+       vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+       if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
                return -ENOMEM;
 
        return 0;
@@ -578,7 +581,7 @@ static int vlan_dev_init(struct net_device *dev)
 static void vlan_dev_uninit(struct net_device *dev)
 {
        struct vlan_priority_tci_mapping *pm;
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        int i;
 
        free_percpu(vlan->vlan_pcpu_stats);
@@ -591,18 +594,17 @@ static void vlan_dev_uninit(struct net_device *dev)
        }
 }
 
-static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        u32 old_features = features;
 
-       features &= real_dev->features;
        features &= real_dev->vlan_features;
+       features |= NETIF_F_RXCSUM;
+       features &= real_dev->features;
 
        features |= old_features & NETIF_F_SOFT_FEATURES;
-
-       if (dev_ethtool_get_rx_csum(real_dev))
-               features |= NETIF_F_RXCSUM;
        features |= NETIF_F_LLTX;
 
        return features;
@@ -611,7 +613,7 @@ static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
 static int vlan_ethtool_get_settings(struct net_device *dev,
                                     struct ethtool_cmd *cmd)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        return __ethtool_get_settings(vlan->real_dev, cmd);
 }
@@ -627,7 +629,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
 
-       if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+       if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
                struct vlan_pcpu_stats *p;
                u32 rx_errors = 0, tx_dropped = 0;
                int i;
@@ -636,7 +638,7 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
                        u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
                        unsigned int start;
 
-                       p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
+                       p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
                        do {
                                start = u64_stats_fetch_begin_bh(&p->syncp);
                                rxpackets       = p->rx_packets;
@@ -661,6 +663,57 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
        return stats;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+       return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+       struct vlan_dev_priv *info = vlan_dev_priv(dev);
+       struct net_device *real_dev = info->real_dev;
+       struct netpoll *netpoll;
+       int err = 0;
+
+       netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!netpoll)
+               goto out;
+
+       netpoll->dev = real_dev;
+       strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
+
+       err = __netpoll_setup(netpoll);
+       if (err) {
+               kfree(netpoll);
+               goto out;
+       }
+
+       info->netpoll = netpoll;
+
+out:
+       return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+       struct vlan_dev_priv *info = vlan_dev_priv(dev);
+       struct netpoll *netpoll = info->netpoll;
+
+       if (!netpoll)
+               return;
+
+       info->netpoll = NULL;
+
+        /* Wait for transmitting packets to finish before freeing. */
+        synchronize_rcu_bh();
+
+        __netpoll_cleanup(netpoll);
+        kfree(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
 static const struct ethtool_ops vlan_ethtool_ops = {
        .get_settings           = vlan_ethtool_get_settings,
        .get_drvinfo            = vlan_ethtool_get_drvinfo,
@@ -688,6 +741,11 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
        .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
        .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = vlan_dev_poll_controller,
+       .ndo_netpoll_setup      = vlan_dev_netpoll_setup,
+       .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
 };
index 061ceceeef1212bba5efe802bfc280c351732eef..6f9755352760c7494ad91eb46f3f43df3f181bdb 100644 (file)
@@ -29,7 +29,7 @@ static struct garp_application vlan_gvrp_app __read_mostly = {
 
 int vlan_gvrp_request_join(const struct net_device *dev)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        __be16 vlan_id = htons(vlan->vlan_id);
 
        return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
@@ -38,7 +38,7 @@ int vlan_gvrp_request_join(const struct net_device *dev)
 
 void vlan_gvrp_request_leave(const struct net_device *dev)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        __be16 vlan_id = htons(vlan->vlan_id);
 
        garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
index 235c2197dbb642d4bf6155f7a0975f7940cce908..50711368ad6a964726de2b284f84ca7d77468759 100644 (file)
@@ -105,7 +105,7 @@ static int vlan_changelink(struct net_device *dev,
 static int vlan_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev;
        int err;
 
@@ -149,7 +149,7 @@ static inline size_t vlan_qos_map_size(unsigned int n)
 
 static size_t vlan_get_size(const struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        return nla_total_size(2) +      /* IFLA_VLAN_ID */
               sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
@@ -159,14 +159,14 @@ static size_t vlan_get_size(const struct net_device *dev)
 
 static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct vlan_priority_tci_mapping *pm;
        struct ifla_vlan_flags f;
        struct ifla_vlan_qos_mapping m;
        struct nlattr *nest;
        unsigned int i;
 
-       NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id);
+       NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
        if (vlan->flags) {
                f.flags = vlan->flags;
                f.mask  = ~0;
@@ -218,7 +218,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
        .kind           = "vlan",
        .maxtype        = IFLA_VLAN_MAX,
        .policy         = vlan_policy,
-       .priv_size      = sizeof(struct vlan_dev_info),
+       .priv_size      = sizeof(struct vlan_dev_priv),
        .setup          = vlan_setup,
        .validate       = vlan_validate,
        .newlink        = vlan_newlink,
index d34b6daf89300671dd8cf049a2c1753664f90770..c718fd3664b60e1bbe1c9e43f284a5f2bd4c982e 100644 (file)
@@ -168,13 +168,13 @@ err:
 
 int vlan_proc_add_dev(struct net_device *vlandev)
 {
-       struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
        struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
-       dev_info->dent =
+       vlan->dent =
                proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
                                 vn->proc_vlan_dir, &vlandev_fops, vlandev);
-       if (!dev_info->dent)
+       if (!vlan->dent)
                return -ENOBUFS;
        return 0;
 }
@@ -187,10 +187,10 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
        struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
        /** NOTE:  This will consume the memory pointed to by dent, it seems. */
-       if (vlan_dev_info(vlandev)->dent) {
-               remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
+       if (vlan_dev_priv(vlandev)->dent) {
+               remove_proc_entry(vlan_dev_priv(vlandev)->dent->name,
                                  vn->proc_vlan_dir);
-               vlan_dev_info(vlandev)->dent = NULL;
+               vlan_dev_priv(vlandev)->dent = NULL;
        }
        return 0;
 }
@@ -268,10 +268,10 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
                           nmtype ? nmtype :  "UNKNOWN");
        } else {
                const struct net_device *vlandev = v;
-               const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+               const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
                seq_printf(seq, "%-15s| %d  | %s\n",  vlandev->name,
-                          dev_info->vlan_id,    dev_info->real_dev->name);
+                          vlan->vlan_id,    vlan->real_dev->name);
        }
        return 0;
 }
@@ -279,7 +279,7 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
 static int vlandev_seq_show(struct seq_file *seq, void *offset)
 {
        struct net_device *vlandev = (struct net_device *) seq->private;
-       const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats;
        static const char fmt64[] = "%30s %12llu\n";
@@ -291,8 +291,8 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        stats = dev_get_stats(vlandev, &temp);
        seq_printf(seq,
                   "%s  VID: %d  REORDER_HDR: %i  dev->priv_flags: %hx\n",
-                  vlandev->name, dev_info->vlan_id,
-                  (int)(dev_info->flags & 1), vlandev->priv_flags);
+                  vlandev->name, vlan->vlan_id,
+                  (int)(vlan->flags & 1), vlandev->priv_flags);
 
        seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
        seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
@@ -300,23 +300,23 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        seq_puts(seq, "\n");
        seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
        seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
-       seq_printf(seq, "Device: %s", dev_info->real_dev->name);
+       seq_printf(seq, "Device: %s", vlan->real_dev->name);
        /* now show all PRIORITY mappings relating to this VLAN */
        seq_printf(seq, "\nINGRESS priority mappings: "
                        "0:%u  1:%u  2:%u  3:%u  4:%u  5:%u  6:%u 7:%u\n",
-                  dev_info->ingress_priority_map[0],
-                  dev_info->ingress_priority_map[1],
-                  dev_info->ingress_priority_map[2],
-                  dev_info->ingress_priority_map[3],
-                  dev_info->ingress_priority_map[4],
-                  dev_info->ingress_priority_map[5],
-                  dev_info->ingress_priority_map[6],
-                  dev_info->ingress_priority_map[7]);
+                  vlan->ingress_priority_map[0],
+                  vlan->ingress_priority_map[1],
+                  vlan->ingress_priority_map[2],
+                  vlan->ingress_priority_map[3],
+                  vlan->ingress_priority_map[4],
+                  vlan->ingress_priority_map[5],
+                  vlan->ingress_priority_map[6],
+                  vlan->ingress_priority_map[7]);
 
        seq_printf(seq, " EGRESS priority mappings: ");
        for (i = 0; i < 16; i++) {
                const struct vlan_priority_tci_mapping *mp
-                       = dev_info->egress_priority_map[i];
+                       = vlan->egress_priority_map[i];
                while (mp) {
                        seq_printf(seq, "%u:%hu ",
                                   mp->priority, ((mp->vlan_qos >> 13) & 0x7));
index a07314844238f4c19fbc4d82d99c547ed9f1f907..e07272d0bb2deddc70c287c8ff82563d08054798 100644 (file)
@@ -215,6 +215,7 @@ source "net/sched/Kconfig"
 source "net/dcb/Kconfig"
 source "net/dns_resolver/Kconfig"
 source "net/batman-adv/Kconfig"
+source "net/openvswitch/Kconfig"
 
 config RPS
        boolean
@@ -232,6 +233,19 @@ config XPS
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
 
+config NETPRIO_CGROUP
+       tristate "Network priority cgroup"
+       depends on CGROUPS
+       ---help---
+         Cgroup subsystem for use in assigning processes to network priorities on
+         a per-interface basis
+
+config BQL
+       boolean
+       depends on SYSFS
+       select DQL
+       default y
+
 config HAVE_BPF_JIT
        bool
 
index acdde4950de428ea0312c789c0f673a31d4c4bd8..ad432fa4d9341fc9f5e7442ba6504d878cc820c5 100644 (file)
@@ -69,3 +69,4 @@ obj-$(CONFIG_DNS_RESOLVER)    += dns_resolver/
 obj-$(CONFIG_CEPH_LIB)         += ceph/
 obj-$(CONFIG_BATMAN_ADV)       += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
+obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
index f41f02656ff41a18adefdcf33423b15c1aed785f..876fbe83e2e4db5ded907ea9938c435edc059e0c 100644 (file)
@@ -26,7 +26,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
                                 gfp_t gfp_flags)
 {
        struct sock *sk = sk_atm(vcc);
-       int guess = atm_guess_pdu2truesize(pdu_size);
+       int guess = SKB_TRUESIZE(pdu_size);
 
        atm_force_charge(vcc, guess);
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
index d07223c834af2a6b2bc45606d64d0093f30d9897..353fccf1cde3ffec2e344fe58e0471bccdf2fb50 100644 (file)
@@ -53,6 +53,7 @@ static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
 static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
 static const unsigned char llc_oui_pid_pad[] =
                        { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
+static const unsigned char pad[] = { PAD_BRIDGED };
 static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
 static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
 
@@ -202,7 +203,10 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
 {
        struct br2684_dev *brdev = BRPRIV(dev);
        struct atm_vcc *atmvcc;
-       int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2;
+       int minheadroom = (brvcc->encaps == e_llc) ?
+               ((brdev->payload == p_bridged) ?
+                       sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
+               ((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
 
        if (skb_headroom(skb) < minheadroom) {
                struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
@@ -450,7 +454,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
                        skb->pkt_type = PACKET_HOST;
                } else { /* p_bridged */
                        /* first 2 chars should be 0 */
-                       if (*((u16 *) (skb->data)) != 0)
+                       if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
                                goto error;
                        skb_pull(skb, BR2684_PAD_LEN);
                        skb->protocol = eth_type_trans(skb, net_dev);
@@ -489,15 +493,11 @@ free_skb:
  */
 static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
 {
-       struct sk_buff_head queue;
-       int err;
        struct br2684_vcc *brvcc;
-       struct sk_buff *skb, *tmp;
-       struct sk_buff_head *rq;
        struct br2684_dev *brdev;
        struct net_device *net_dev;
        struct atm_backend_br2684 be;
-       unsigned long flags;
+       int err;
 
        if (copy_from_user(&be, arg, sizeof be))
                return -EFAULT;
@@ -550,23 +550,6 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
        atmvcc->push = br2684_push;
        atmvcc->pop = br2684_pop;
 
-       __skb_queue_head_init(&queue);
-       rq = &sk_atm(atmvcc)->sk_receive_queue;
-
-       spin_lock_irqsave(&rq->lock, flags);
-       skb_queue_splice_init(rq, &queue);
-       spin_unlock_irqrestore(&rq->lock, flags);
-
-       skb_queue_walk_safe(&queue, skb, tmp) {
-               struct net_device *dev;
-
-               br2684_push(atmvcc, skb);
-               dev = skb->dev;
-
-               dev->stats.rx_bytes -= skb->len;
-               dev->stats.rx_packets--;
-       }
-
        /* initialize netdev carrier state */
        if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
                netif_carrier_off(net_dev);
@@ -574,6 +557,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
                netif_carrier_on(net_dev);
 
        __module_get(THIS_MODULE);
+
+       /* re-process everything received between connection setup and
+          backend setup */
+       vcc_process_recv_queue(atmvcc);
        return 0;
 
 error:
@@ -600,6 +587,7 @@ static void br2684_setup(struct net_device *netdev)
        struct br2684_dev *brdev = BRPRIV(netdev);
 
        ether_setup(netdev);
+       netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */
        brdev->net_dev = netdev;
 
        netdev->netdev_ops = &br2684_netdev_ops;
@@ -612,7 +600,7 @@ static void br2684_setup_routed(struct net_device *netdev)
        struct br2684_dev *brdev = BRPRIV(netdev);
 
        brdev->net_dev = netdev;
-       netdev->hard_header_len = 0;
+       netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */
        netdev->netdev_ops = &br2684_netdev_ops_routed;
        netdev->addr_len = 0;
        netdev->mtu = 1500;
index 852394072fa151956cec3e819da113024fbb6971..c12c2582457cf80cb2844563196a3473a9d17e00 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <net/route.h> /* for struct rtable and routing */
 #include <net/icmp.h> /* icmp_send */
+#include <net/arp.h>
 #include <linux/param.h> /* for HZ */
 #include <linux/uaccess.h>
 #include <asm/byteorder.h> /* for htons etc. */
@@ -119,7 +120,7 @@ out:
 /* The neighbour entry n->lock is held. */
 static int neigh_check_cb(struct neighbour *n)
 {
-       struct atmarp_entry *entry = NEIGH2ENTRY(n);
+       struct atmarp_entry *entry = neighbour_priv(n);
        struct clip_vcc *cv;
 
        for (cv = entry->vccs; cv; cv = cv->next) {
@@ -189,6 +190,13 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
        struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
 
        pr_debug("\n");
+
+       if (!clip_devs) {
+               atm_return(vcc, skb->truesize);
+               kfree_skb(skb);
+               return;
+       }
+
        if (!skb) {
                pr_debug("removing VCC %p\n", clip_vcc);
                if (clip_vcc->entry)
@@ -255,8 +263,10 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
 
 static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
+       __be32 *ip = (__be32 *) neigh->primary_key;
+
        pr_debug("(neigh %p, skb %p)\n", neigh, skb);
-       to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
+       to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip);
 }
 
 static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
@@ -277,72 +287,24 @@ static const struct neigh_ops clip_neigh_ops = {
 
 static int clip_constructor(struct neighbour *neigh)
 {
-       struct atmarp_entry *entry = NEIGH2ENTRY(neigh);
-       struct net_device *dev = neigh->dev;
-       struct in_device *in_dev;
-       struct neigh_parms *parms;
+       struct atmarp_entry *entry = neighbour_priv(neigh);
 
-       pr_debug("(neigh %p, entry %p)\n", neigh, entry);
-       neigh->type = inet_addr_type(&init_net, entry->ip);
-       if (neigh->type != RTN_UNICAST)
+       if (neigh->tbl->family != AF_INET)
                return -EINVAL;
 
-       rcu_read_lock();
-       in_dev = __in_dev_get_rcu(dev);
-       if (!in_dev) {
-               rcu_read_unlock();
+       if (neigh->type != RTN_UNICAST)
                return -EINVAL;
-       }
-
-       parms = in_dev->arp_parms;
-       __neigh_parms_put(neigh->parms);
-       neigh->parms = neigh_parms_clone(parms);
-       rcu_read_unlock();
 
+       neigh->nud_state = NUD_NONE;
        neigh->ops = &clip_neigh_ops;
-       neigh->output = neigh->nud_state & NUD_VALID ?
-           neigh->ops->connected_output : neigh->ops->output;
+       neigh->output = neigh->ops->output;
        entry->neigh = neigh;
        entry->vccs = NULL;
        entry->expires = jiffies - 1;
+
        return 0;
 }
 
-static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd)
-{
-       return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd);
-}
-
-static struct neigh_table clip_tbl = {
-       .family         = AF_INET,
-       .entry_size     = sizeof(struct neighbour)+sizeof(struct atmarp_entry),
-       .key_len        = 4,
-       .hash           = clip_hash,
-       .constructor    = clip_constructor,
-       .id             = "clip_arp_cache",
-
-       /* parameters are copied from ARP ... */
-       .parms = {
-               .tbl                    = &clip_tbl,
-               .base_reachable_time    = 30 * HZ,
-               .retrans_time           = 1 * HZ,
-               .gc_staletime           = 60 * HZ,
-               .reachable_time         = 30 * HZ,
-               .delay_probe_time       = 5 * HZ,
-               .queue_len              = 3,
-               .ucast_probes           = 3,
-               .mcast_probes           = 3,
-               .anycast_delay          = 1 * HZ,
-               .proxy_delay            = (8 * HZ) / 10,
-               .proxy_qlen             = 64,
-               .locktime               = 1 * HZ,
-       },
-       .gc_interval    = 30 * HZ,
-       .gc_thresh1     = 128,
-       .gc_thresh2     = 512,
-       .gc_thresh3     = 1024,
-};
-
 /* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */
 
 /*
@@ -376,28 +338,19 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       n = dst_get_neighbour(dst);
+       n = dst_get_neighbour_noref(dst);
        if (!n) {
-#if 0
-               n = clip_find_neighbour(skb_dst(skb), 1);
-               if (!n) {
-                       dev_kfree_skb(skb);     /* lost that one */
-                       dev->stats.tx_dropped++;
-                       return 0;
-               }
-               dst_set_neighbour(dst, n);
-#endif
                pr_err("NO NEIGHBOUR !\n");
                dev_kfree_skb(skb);
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       entry = NEIGH2ENTRY(n);
+       entry = neighbour_priv(n);
        if (!entry->vccs) {
                if (time_after(jiffies, entry->expires)) {
                        /* should be resolved */
                        entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
-                       to_atmarpd(act_need, PRIV(dev)->number, entry->ip);
+                       to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key));
                }
                if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
                        skb_queue_tail(&entry->neigh->arp_queue, skb);
@@ -448,10 +401,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
 
 static int clip_mkip(struct atm_vcc *vcc, int timeout)
 {
-       struct sk_buff_head *rq, queue;
        struct clip_vcc *clip_vcc;
-       struct sk_buff *skb, *tmp;
-       unsigned long flags;
 
        if (!vcc->push)
                return -EBADFD;
@@ -472,29 +422,9 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
        vcc->push = clip_push;
        vcc->pop = clip_pop;
 
-       __skb_queue_head_init(&queue);
-       rq = &sk_atm(vcc)->sk_receive_queue;
-
-       spin_lock_irqsave(&rq->lock, flags);
-       skb_queue_splice_init(rq, &queue);
-       spin_unlock_irqrestore(&rq->lock, flags);
-
        /* re-process everything received between connection setup and MKIP */
-       skb_queue_walk_safe(&queue, skb, tmp) {
-               if (!clip_devs) {
-                       atm_return(vcc, skb->truesize);
-                       kfree_skb(skb);
-               } else {
-                       struct net_device *dev = skb->dev;
-                       unsigned int len = skb->len;
-
-                       skb_get(skb);
-                       clip_push(vcc, skb);
-                       dev->stats.rx_packets--;
-                       dev->stats.rx_bytes -= len;
-                       kfree_skb(skb);
-               }
-       }
+       vcc_process_recv_queue(vcc);
+
        return 0;
 }
 
@@ -523,11 +453,11 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
        rt = ip_route_output(&init_net, ip, 0, 1, 0);
        if (IS_ERR(rt))
                return PTR_ERR(rt);
-       neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
+       neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
        ip_rt_put(rt);
        if (!neigh)
                return -ENOMEM;
-       entry = NEIGH2ENTRY(neigh);
+       entry = neighbour_priv(neigh);
        if (entry != clip_vcc->entry) {
                if (!clip_vcc->entry)
                        pr_debug("add\n");
@@ -544,13 +474,15 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
 }
 
 static const struct net_device_ops clip_netdev_ops = {
-       .ndo_start_xmit = clip_start_xmit,
+       .ndo_start_xmit         = clip_start_xmit,
+       .ndo_neigh_construct    = clip_constructor,
 };
 
 static void clip_setup(struct net_device *dev)
 {
        dev->netdev_ops = &clip_netdev_ops;
        dev->type = ARPHRD_ATM;
+       dev->neigh_priv_len = sizeof(struct atmarp_entry);
        dev->hard_header_len = RFC1483LLC_LEN;
        dev->mtu = RFC1626_MTU;
        dev->tx_queue_len = 100;        /* "normal" queue (packets) */
@@ -604,10 +536,8 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
 
-       if (event == NETDEV_UNREGISTER) {
-               neigh_ifdown(&clip_tbl, dev);
+       if (event == NETDEV_UNREGISTER)
                return NOTIFY_DONE;
-       }
 
        /* ignore non-CLIP devices */
        if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops)
@@ -787,9 +717,10 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
 /* This means the neighbour entry has no attached VCC objects. */
 #define SEQ_NO_VCC_TOKEN       ((void *) 2)
 
-static void atmarp_info(struct seq_file *seq, struct net_device *dev,
+static void atmarp_info(struct seq_file *seq, struct neighbour *n,
                        struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
 {
+       struct net_device *dev = n->dev;
        unsigned long exp;
        char buf[17];
        int svc, llc, off;
@@ -809,8 +740,7 @@ static void atmarp_info(struct seq_file *seq, struct net_device *dev,
        seq_printf(seq, "%-6s%-4s%-4s%5ld ",
                   dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp);
 
-       off = scnprintf(buf, sizeof(buf) - 1, "%pI4",
-                       &entry->ip);
+       off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key);
        while (off < 16)
                buf[off++] = ' ';
        buf[off] = '\0';
@@ -881,14 +811,17 @@ static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
 {
        struct clip_seq_state *state = (struct clip_seq_state *)_state;
 
-       return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos);
+       if (n->dev->type != ARPHRD_ATM)
+               return NULL;
+
+       return clip_seq_vcc_walk(state, neighbour_priv(n), pos);
 }
 
 static void *clip_seq_start(struct seq_file *seq, loff_t * pos)
 {
        struct clip_seq_state *state = seq->private;
        state->ns.neigh_sub_iter = clip_seq_sub_iter;
-       return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY);
+       return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY);
 }
 
 static int clip_seq_show(struct seq_file *seq, void *v)
@@ -900,10 +833,10 @@ static int clip_seq_show(struct seq_file *seq, void *v)
                seq_puts(seq, atm_arp_banner);
        } else {
                struct clip_seq_state *state = seq->private;
-               struct neighbour *n = v;
                struct clip_vcc *vcc = state->vcc;
+               struct neighbour *n = v;
 
-               atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc);
+               atmarp_info(seq, n, neighbour_priv(n), vcc);
        }
        return 0;
 }
@@ -934,9 +867,6 @@ static void atm_clip_exit_noproc(void);
 
 static int __init atm_clip_init(void)
 {
-       neigh_table_init_no_netlink(&clip_tbl);
-
-       clip_tbl_hook = &clip_tbl;
        register_atm_ioctl(&clip_ioctl_ops);
        register_netdevice_notifier(&clip_dev_notifier);
        register_inetaddr_notifier(&clip_inet_notifier);
@@ -973,12 +903,6 @@ static void atm_clip_exit_noproc(void)
         */
        del_timer_sync(&idle_timer);
 
-       /* Next, purge the table, so that the device
-        * unregister loop below does not hang due to
-        * device references remaining in the table.
-        */
-       neigh_ifdown(&clip_tbl, NULL);
-
        dev = clip_devs;
        while (dev) {
                next = PRIV(dev)->next;
@@ -986,11 +910,6 @@ static void atm_clip_exit_noproc(void)
                free_netdev(dev);
                dev = next;
        }
-
-       /* Now it is safe to fully shutdown whole table. */
-       neigh_table_clear(&clip_tbl);
-
-       clip_tbl_hook = NULL;
 }
 
 static void __exit atm_clip_exit(void)
index 14ff9fe399896c024630c3bced79de195cd5dd54..b4b44dbed645f74046ae4663a31b805a0ed72b52 100644 (file)
@@ -214,6 +214,26 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
 }
 EXPORT_SYMBOL(vcc_release_async);
 
+void vcc_process_recv_queue(struct atm_vcc *vcc)
+{
+       struct sk_buff_head queue, *rq;
+       struct sk_buff *skb, *tmp;
+       unsigned long flags;
+
+       __skb_queue_head_init(&queue);
+       rq = &sk_atm(vcc)->sk_receive_queue;
+
+       spin_lock_irqsave(&rq->lock, flags);
+       skb_queue_splice_init(rq, &queue);
+       spin_unlock_irqrestore(&rq->lock, flags);
+
+       skb_queue_walk_safe(&queue, skb, tmp) {
+               __skb_unlink(skb, &queue);
+               vcc->push(vcc, skb);
+       }
+}
+EXPORT_SYMBOL(vcc_process_recv_queue);
+
 void atm_dev_signal_change(struct atm_dev *dev, char signal)
 {
        pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
@@ -502,8 +522,11 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 
        if (sock->state != SS_CONNECTED)
                return -ENOTCONN;
-       if (flags & ~MSG_DONTWAIT)              /* only handle MSG_DONTWAIT */
+
+       /* only handle MSG_DONTWAIT and MSG_PEEK */
+       if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
                return -EOPNOTSUPP;
+
        vcc = ATM_SD(sock);
        if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
            test_bit(ATM_VF_CLOSE, &vcc->flags) ||
@@ -524,8 +547,13 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        if (error)
                return error;
        sock_recv_ts_and_drops(msg, sk, skb);
-       pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
-       atm_return(vcc, skb->truesize);
+
+       if (!(flags & MSG_PEEK)) {
+               pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
+                        skb->truesize);
+               atm_return(vcc, skb->truesize);
+       }
+
        skb_free_datagram(sk, skb);
        return copied;
 }
index f48a76b6cdf48845a5834800f6716113c9c95914..cc3c2dae4d793427259e614acfec579d42f0c2a1 100644 (file)
@@ -24,6 +24,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
                   char __user *optval, unsigned int optlen);
 int vcc_getsockopt(struct socket *sock, int level, int optname,
                   char __user *optval, int __user *optlen);
+void vcc_process_recv_queue(struct atm_vcc *vcc);
 
 int atmpvc_init(void);
 void atmpvc_exit(void);
index db4a11c61d15c08462623975d027d1bbb81fa4a2..df35d9a3b5fe91672cf0890d49566ae96e1f11e5 100644 (file)
@@ -303,6 +303,10 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
        atmvcc->push = pppoatm_push;
        atmvcc->pop = pppoatm_pop;
        __module_get(THIS_MODULE);
+
+       /* re-process everything received between connection setup and
+          backend setup */
+       vcc_process_recv_queue(atmvcc);
        return 0;
 }
 
index e7c69f4619ec0f7f90d41d9fbbe7f0caa2b7d52f..3cd0a0dc91cb6b3823271ccd109ab021dc5abef1 100644 (file)
@@ -402,14 +402,14 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
                break;
 
        case AX25_T1:
-               if (ax25_ctl.arg < 1)
+               if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
                        goto einval_put;
                ax25->rtt = (ax25_ctl.arg * HZ) / 2;
                ax25->t1  = ax25_ctl.arg * HZ;
                break;
 
        case AX25_T2:
-               if (ax25_ctl.arg < 1)
+               if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
                        goto einval_put;
                ax25->t2 = ax25_ctl.arg * HZ;
                break;
@@ -422,10 +422,15 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
                break;
 
        case AX25_T3:
+               if (ax25_ctl.arg > ULONG_MAX / HZ)
+                       goto einval_put;
                ax25->t3 = ax25_ctl.arg * HZ;
                break;
 
        case AX25_IDLE:
+               if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
+                       goto einval_put;
+
                ax25->idle = ax25_ctl.arg * 60 * HZ;
                break;
 
@@ -540,15 +545,16 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
        ax25_cb *ax25;
        struct net_device *dev;
        char devname[IFNAMSIZ];
-       int opt, res = 0;
+       unsigned long opt;
+       int res = 0;
 
        if (level != SOL_AX25)
                return -ENOPROTOOPT;
 
-       if (optlen < sizeof(int))
+       if (optlen < sizeof(unsigned int))
                return -EINVAL;
 
-       if (get_user(opt, (int __user *)optval))
+       if (get_user(opt, (unsigned int __user *)optval))
                return -EFAULT;
 
        lock_sock(sk);
@@ -571,7 +577,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_T1:
-               if (opt < 1) {
+               if (opt < 1 || opt > ULONG_MAX / HZ) {
                        res = -EINVAL;
                        break;
                }
@@ -580,7 +586,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_T2:
-               if (opt < 1) {
+               if (opt < 1 || opt > ULONG_MAX / HZ) {
                        res = -EINVAL;
                        break;
                }
@@ -596,7 +602,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_T3:
-               if (opt < 1) {
+               if (opt < 1 || opt > ULONG_MAX / HZ) {
                        res = -EINVAL;
                        break;
                }
@@ -604,7 +610,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_IDLE:
-               if (opt < 0) {
+               if (opt > ULONG_MAX / (60 * HZ)) {
                        res = -EINVAL;
                        break;
                }
@@ -612,7 +618,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_BACKOFF:
-               if (opt < 0 || opt > 2) {
+               if (opt > 2) {
                        res = -EINVAL;
                        break;
                }
index b8a7414c35716ade771f2a18ff2b8e8ffe9ead23..c25492f7d665266897a68d8ac06d9860ca87cbf4 100644 (file)
@@ -174,7 +174,7 @@ static int store_uint_attr(const char *buff, size_t count,
        unsigned long uint_val;
        int ret;
 
-       ret = strict_strtoul(buff, 10, &uint_val);
+       ret = kstrtoul(buff, 10, &uint_val);
        if (ret) {
                bat_info(net_dev,
                         "%s: Invalid parameter received: %s\n",
@@ -239,7 +239,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
        unsigned long val;
        int ret, vis_mode_tmp = -1;
 
-       ret = strict_strtoul(buff, 10, &val);
+       ret = kstrtoul(buff, 10, &val);
 
        if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
            (strncmp(buff, "client", 6) == 0) ||
index 0be9ff346fa047bb0e4380aba74a5a50d5ad113c..9bc63b209b3fd0987c31db60716d70f92cd9e7a5 100644 (file)
@@ -155,7 +155,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
        /* sequence number is much newer, probably missed a lot of packets */
 
        if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
-               || (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
+               && (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "We missed a lot of packets (%i) !\n",
                        seq_num_diff - 1);
index 619fb73b3b76f834bdd0abad815f69482f9110a9..24403a7350f75fb5767e8855041c31b8d166fea9 100644 (file)
@@ -25,6 +25,7 @@
 #include "gateway_common.h"
 #include "hard-interface.h"
 #include "originator.h"
+#include "translation-table.h"
 #include "routing.h"
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -572,108 +573,142 @@ out:
        return ret;
 }
 
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
-                struct orig_node *old_gw)
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
 {
        struct ethhdr *ethhdr;
        struct iphdr *iphdr;
        struct ipv6hdr *ipv6hdr;
        struct udphdr *udphdr;
-       struct gw_node *curr_gw;
-       struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
-       unsigned int header_len = 0;
-       int ret = 1;
-
-       if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
-               return 0;
 
        /* check for ethernet header */
-       if (!pskb_may_pull(skb, header_len + ETH_HLEN))
-               return 0;
+       if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
+               return false;
        ethhdr = (struct ethhdr *)skb->data;
-       header_len += ETH_HLEN;
+       *header_len += ETH_HLEN;
 
        /* check for initial vlan header */
        if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
-               if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
-                       return 0;
+               if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
+                       return false;
                ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
-               header_len += VLAN_HLEN;
+               *header_len += VLAN_HLEN;
        }
 
        /* check for ip header */
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_IP:
-               if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
-                       return 0;
-               iphdr = (struct iphdr *)(skb->data + header_len);
-               header_len += iphdr->ihl * 4;
+               if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
+                       return false;
+               iphdr = (struct iphdr *)(skb->data + *header_len);
+               *header_len += iphdr->ihl * 4;
 
                /* check for udp header */
                if (iphdr->protocol != IPPROTO_UDP)
-                       return 0;
+                       return false;
 
                break;
        case ETH_P_IPV6:
-               if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
-                       return 0;
-               ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
-               header_len += sizeof(*ipv6hdr);
+               if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
+                       return false;
+               ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
+               *header_len += sizeof(*ipv6hdr);
 
                /* check for udp header */
                if (ipv6hdr->nexthdr != IPPROTO_UDP)
-                       return 0;
+                       return false;
 
                break;
        default:
-               return 0;
+               return false;
        }
 
-       if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
-               return 0;
-       udphdr = (struct udphdr *)(skb->data + header_len);
-       header_len += sizeof(*udphdr);
+       if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
+               return false;
+       udphdr = (struct udphdr *)(skb->data + *header_len);
+       *header_len += sizeof(*udphdr);
 
        /* check for bootp port */
        if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
             (ntohs(udphdr->dest) != 67))
-               return 0;
+               return false;
 
        if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
            (ntohs(udphdr->dest) != 547))
-               return 0;
+               return false;
 
-       if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
-               return -1;
+       return true;
+}
 
-       curr_gw = gw_get_selected_gw_node(bat_priv);
-       if (!curr_gw)
-               return 0;
-
-       /* If old_gw != NULL then this packet is unicast.
-        * So, at this point we have to check the message type: if it is a
-        * DHCPREQUEST we have to decide whether to drop it or not */
-       if (old_gw && curr_gw->orig_node != old_gw) {
-               if (is_type_dhcprequest(skb, header_len)) {
-                       /* If the dhcp packet has been sent to a different gw,
-                        * we have to evaluate whether the old gw is still
-                        * reliable enough */
-                       neigh_curr = find_router(bat_priv, curr_gw->orig_node,
-                                                NULL);
-                       neigh_old = find_router(bat_priv, old_gw, NULL);
-                       if (!neigh_curr || !neigh_old)
-                               goto free_neigh;
-                       if (neigh_curr->tq_avg - neigh_old->tq_avg <
-                                                               GW_THRESHOLD)
-                               ret = -1;
-               }
+bool gw_out_of_range(struct bat_priv *bat_priv,
+                    struct sk_buff *skb, struct ethhdr *ethhdr)
+{
+       struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+       struct orig_node *orig_dst_node = NULL;
+       struct gw_node *curr_gw = NULL;
+       bool ret, out_of_range = false;
+       unsigned int header_len = 0;
+       uint8_t curr_tq_avg;
+
+       ret = gw_is_dhcp_target(skb, &header_len);
+       if (!ret)
+               goto out;
+
+       orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
+                                         ethhdr->h_dest);
+       if (!orig_dst_node)
+               goto out;
+
+       if (!orig_dst_node->gw_flags)
+               goto out;
+
+       ret = is_type_dhcprequest(skb, header_len);
+       if (!ret)
+               goto out;
+
+       switch (atomic_read(&bat_priv->gw_mode)) {
+       case GW_MODE_SERVER:
+               /* If we are a GW then we are our best GW. We can artificially
+                * set the tq towards ourself as the maximum value */
+               curr_tq_avg = TQ_MAX_VALUE;
+               break;
+       case GW_MODE_CLIENT:
+               curr_gw = gw_get_selected_gw_node(bat_priv);
+               if (!curr_gw)
+                       goto out;
+
+               /* packet is going to our gateway */
+               if (curr_gw->orig_node == orig_dst_node)
+                       goto out;
+
+               /* If the dhcp packet has been sent to a different gw,
+                * we have to evaluate whether the old gw is still
+                * reliable enough */
+               neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
+               if (!neigh_curr)
+                       goto out;
+
+               curr_tq_avg = neigh_curr->tq_avg;
+               break;
+       case GW_MODE_OFF:
+       default:
+               goto out;
        }
-free_neigh:
+
+       neigh_old = find_router(bat_priv, orig_dst_node, NULL);
+       if (!neigh_old)
+               goto out;
+
+       if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
+               out_of_range = true;
+
+out:
+       if (orig_dst_node)
+               orig_node_free_ref(orig_dst_node);
+       if (curr_gw)
+               gw_node_free_ref(curr_gw);
        if (neigh_old)
                neigh_node_free_ref(neigh_old);
        if (neigh_curr)
                neigh_node_free_ref(neigh_curr);
-       if (curr_gw)
-               gw_node_free_ref(curr_gw);
-       return ret;
+       return out_of_range;
 }
index b9b983c07feb41a61bff77b596f14c91690c3f4c..e1edba08eb1d8ffdc31ac21fbb63543175aabe7d 100644 (file)
@@ -31,7 +31,8 @@ void gw_node_update(struct bat_priv *bat_priv,
 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void gw_node_purge(struct bat_priv *bat_priv);
 int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
-                struct orig_node *old_gw);
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+bool gw_out_of_range(struct bat_priv *bat_priv,
+                    struct sk_buff *skb, struct ethhdr *ethhdr);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
index 18661af0bc3becbe4cc57ae8e2b9ec8a92d482e0..c4ac7b0a2a63cb641ea86f28347ce6bcc7f35016 100644 (file)
@@ -97,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                        *tmp_ptr = '\0';
        }
 
-       ret = strict_strtol(buff, 10, &ldown);
+       ret = kstrtol(buff, 10, &ldown);
        if (ret) {
                bat_err(net_dev,
                        "Download speed of gateway mode invalid: %s\n",
@@ -122,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                                *tmp_ptr = '\0';
                }
 
-               ret = strict_strtol(slash_ptr + 1, 10, &lup);
+               ret = kstrtol(slash_ptr + 1, 10, &lup);
                if (ret) {
                        bat_err(net_dev,
                                "Upload speed of gateway mode invalid: "
index 2a172505f5134c4b1618765ec14c7ae8e0209c5c..d1da29da333b35d33185e5bf883a6f5776239361 100644 (file)
@@ -25,7 +25,7 @@
 /* clears the hash */
 static void hash_init(struct hashtable_t *hash)
 {
-       int i;
+       uint32_t i;
 
        for (i = 0 ; i < hash->size; i++) {
                INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@ void hash_destroy(struct hashtable_t *hash)
 }
 
 /* allocates and clears the hash */
-struct hashtable_t *hash_new(int size)
+struct hashtable_t *hash_new(uint32_t size)
 {
        struct hashtable_t *hash;
 
index d20aa71ba1e88e4d7dde1ee01c56642f8417b9b4..4768717f07f9f850ccefd681fa75c93c4d09363a 100644 (file)
@@ -33,17 +33,17 @@ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
 /* the hashfunction, should return an index
  * based on the key in the data of the first
  * argument and the size the second */
-typedef int (*hashdata_choose_cb)(const void *, int);
+typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
 typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
 
 struct hashtable_t {
        struct hlist_head *table;   /* the hashtable itself with the buckets */
        spinlock_t *list_locks;     /* spinlock for each hash list entry */
-       int size;                   /* size of hashtable */
+       uint32_t size;              /* size of hashtable */
 };
 
 /* allocates and clears the hash */
-struct hashtable_t *hash_new(int size);
+struct hashtable_t *hash_new(uint32_t size);
 
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash);
@@ -57,7 +57,7 @@ static inline void hash_delete(struct hashtable_t *hash,
        struct hlist_head *head;
        struct hlist_node *node, *node_tmp;
        spinlock_t *list_lock; /* spinlock to protect write access */
-       int i;
+       uint32_t i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -93,7 +93,8 @@ static inline int hash_add(struct hashtable_t *hash,
                           hashdata_choose_cb choose,
                           const void *data, struct hlist_node *data_node)
 {
-       int index, ret = -1;
+       uint32_t index;
+       int ret = -1;
        struct hlist_head *head;
        struct hlist_node *node;
        spinlock_t *list_lock; /* spinlock to protect write access */
@@ -137,7 +138,7 @@ static inline void *hash_remove(struct hashtable_t *hash,
                                hashdata_compare_cb compare,
                                hashdata_choose_cb choose, void *data)
 {
-       size_t index;
+       uint32_t index;
        struct hlist_node *node;
        struct hlist_head *head;
        void *data_save = NULL;
index ac3520e057c0079d97943433a8df648ac1cc402c..d9c1e7bb7fbfa4ba6d5d579bfe1b266cbab65052 100644 (file)
@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
 
        spin_unlock_bh(&socket_client->lock);
 
-       error = __copy_to_user(buf, &socket_packet->icmp_packet,
-                              socket_packet->icmp_len);
+       packet_len = min(count, socket_packet->icmp_len);
+       error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
 
-       packet_len = socket_packet->icmp_len;
        kfree(socket_packet);
 
        if (error)
@@ -187,12 +186,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        skb_reserve(skb, sizeof(struct ethhdr));
        icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
 
-       if (!access_ok(VERIFY_READ, buff, packet_len)) {
-               len = -EFAULT;
-               goto free_skb;
-       }
-
-       if (__copy_from_user(icmp_packet, buff, packet_len)) {
+       if (copy_from_user(icmp_packet, buff, packet_len)) {
                len = -EFAULT;
                goto free_skb;
        }
@@ -217,7 +211,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
 
        if (icmp_packet->version != COMPAT_VERSION) {
                icmp_packet->msg_type = PARAMETER_PROBLEM;
-               icmp_packet->ttl = COMPAT_VERSION;
+               icmp_packet->version = COMPAT_VERSION;
                bat_socket_add_packet(socket_client, icmp_packet, packet_len);
                goto free_skb;
        }
index 964ad4d8ba33afb77fcc5f9148ff779b230174c8..86354e06eb48bceb3d43ea443d7090805cf78e46 100644 (file)
@@ -28,7 +28,7 @@
 #define DRIVER_DEVICE "batman-adv"
 
 #ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2011.4.0"
+#define SOURCE_VERSION "2012.0.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
index 0e5b77255d9994db9c4cc9fb64f677d82271f3f8..0bc2045a2f2e519118167469f07fa868d6e806ea 100644 (file)
@@ -164,7 +164,7 @@ void originator_free(struct bat_priv *bat_priv)
        struct hlist_head *head;
        spinlock_t *list_lock; /* spinlock to protect write access */
        struct orig_node *orig_node;
-       int i;
+       uint32_t i;
 
        if (!hash)
                return;
@@ -350,7 +350,7 @@ static void _purge_orig(struct bat_priv *bat_priv)
        struct hlist_head *head;
        spinlock_t *list_lock; /* spinlock to protect write access */
        struct orig_node *orig_node;
-       int i;
+       uint32_t i;
 
        if (!hash)
                return;
@@ -413,7 +413,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
        int batman_count = 0;
        int last_seen_secs;
        int last_seen_msecs;
-       int i, ret = 0;
+       uint32_t i;
+       int ret = 0;
 
        primary_if = primary_if_get_selected(bat_priv);
 
@@ -519,7 +520,8 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
        struct hlist_node *node;
        struct hlist_head *head;
        struct orig_node *orig_node;
-       int i, ret;
+       uint32_t i;
+       int ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
@@ -601,7 +603,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
        struct hlist_head *head;
        struct hard_iface *hard_iface_tmp;
        struct orig_node *orig_node;
-       int i, ret;
+       uint32_t i;
+       int ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
index cfc1f60a96a107f6aab51cf9076e3a871c1b947c..67765ffef731d20b544aef7002909820956ef6d0 100644 (file)
@@ -42,7 +42,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
 
 /* hashfunction to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline int choose_orig(const void *data, int32_t size)
+static inline uint32_t choose_orig(const void *data, uint32_t size)
 {
        const unsigned char *key = data;
        uint32_t hash = 0;
index f961cc5eade5e2255489812b975162db4909e2b8..773e606f9702bfcc04b7c41ddf2234790fb205c0 100644 (file)
@@ -39,7 +39,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
        struct hlist_head *head;
        struct orig_node *orig_node;
        unsigned long *word;
-       int i;
+       uint32_t i;
        size_t word_index;
 
        for (i = 0; i < hash->size; i++) {
@@ -578,6 +578,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct tt_query_packet *tt_query;
+       uint16_t tt_len;
        struct ethhdr *ethhdr;
 
        /* drop packet if it has not necessary minimum size */
@@ -616,13 +617,21 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
                }
                break;
        case TT_RESPONSE:
-               /* packet needs to be linearized to access the TT changes */
-               if (skb_linearize(skb) < 0)
-                       goto out;
+               if (is_my_mac(tt_query->dst)) {
+                       /* packet needs to be linearized to access the TT
+                        * changes */
+                       if (skb_linearize(skb) < 0)
+                               goto out;
+
+                       tt_len = tt_query->tt_data * sizeof(struct tt_change);
+
+                       /* Ensure we have all the claimed data */
+                       if (unlikely(skb_headlen(skb) <
+                                    sizeof(struct tt_query_packet) + tt_len))
+                               goto out;
 
-               if (is_my_mac(tt_query->dst))
                        handle_tt_response(bat_priv, tt_query);
-               else {
+               else {
                        bat_dbg(DBG_TT, bat_priv,
                                "Routing TT_RESPONSE to %pM [%c]\n",
                                tt_query->dst,
index f9cc957289894ea8471f23ae06eb70d4ec6f52d9..987c75a775f9c034c5f6648fc13f05203f1419dc 100644 (file)
@@ -563,10 +563,10 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
        struct bcast_packet *bcast_packet;
        struct vlan_ethhdr *vhdr;
        struct softif_neigh *curr_softif_neigh = NULL;
-       struct orig_node *orig_node = NULL;
+       unsigned int header_len = 0;
        int data_len = skb->len, ret;
        short vid = -1;
-       bool do_bcast;
+       bool do_bcast = false;
 
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
                goto dropped;
@@ -598,17 +598,28 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
        /* Register the client MAC in the transtable */
        tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
 
-       orig_node = transtable_search(bat_priv, ethhdr->h_source,
-                                     ethhdr->h_dest);
-       do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
-       if (do_bcast || (orig_node && orig_node->gw_flags)) {
-               ret = gw_is_target(bat_priv, skb, orig_node);
+       if (is_multicast_ether_addr(ethhdr->h_dest)) {
+               do_bcast = true;
 
-               if (ret < 0)
-                       goto dropped;
-
-               if (ret)
-                       do_bcast = false;
+               switch (atomic_read(&bat_priv->gw_mode)) {
+               case GW_MODE_SERVER:
+                       /* gateway servers should not send dhcp
+                        * requests into the mesh */
+                       ret = gw_is_dhcp_target(skb, &header_len);
+                       if (ret)
+                               goto dropped;
+                       break;
+               case GW_MODE_CLIENT:
+                       /* gateway clients should send dhcp requests
+                        * via unicast to their gateway */
+                       ret = gw_is_dhcp_target(skb, &header_len);
+                       if (ret)
+                               do_bcast = false;
+                       break;
+               case GW_MODE_OFF:
+               default:
+                       break;
+               }
        }
 
        /* ethernet packet should be broadcasted */
@@ -644,6 +655,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
 
        /* unicast packet */
        } else {
+               if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
+                       ret = gw_out_of_range(bat_priv, skb, ethhdr);
+                       if (ret)
+                               goto dropped;
+               }
+
                ret = unicast_send_skb(skb, bat_priv);
                if (ret != 0)
                        goto dropped_freed;
@@ -662,8 +679,6 @@ end:
                softif_neigh_free_ref(curr_softif_neigh);
        if (primary_if)
                hardif_free_ref(primary_if);
-       if (orig_node)
-               orig_node_free_ref(orig_node);
        return NETDEV_TX_OK;
 }
 
@@ -859,7 +874,7 @@ unreg_debugfs:
 unreg_sysfs:
        sysfs_del_meshif(soft_iface);
 unreg_soft_iface:
-       unregister_netdev(soft_iface);
+       unregister_netdevice(soft_iface);
        return NULL;
 
 free_soft_iface:
index c7aafc7c5ed4854b2a46f6e37b5386ef937fee8e..ab8dea8b0b2e6ae0bcf5f29e2253f0e6aa37b207 100644 (file)
@@ -36,18 +36,9 @@ static void _tt_global_del(struct bat_priv *bat_priv,
 static void tt_purge(struct work_struct *work);
 
 /* returns 1 if they are the same mac addr */
-static int compare_ltt(const struct hlist_node *node, const void *data2)
+static int compare_tt(const struct hlist_node *node, const void *data2)
 {
-       const void *data1 = container_of(node, struct tt_local_entry,
-                                        hash_entry);
-
-       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
-/* returns 1 if they are the same mac addr */
-static int compare_gtt(const struct hlist_node *node, const void *data2)
-{
-       const void *data1 = container_of(node, struct tt_global_entry,
+       const void *data1 = container_of(node, struct tt_common_entry,
                                         hash_entry);
 
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
@@ -60,14 +51,13 @@ static void tt_start_timer(struct bat_priv *bat_priv)
                           msecs_to_jiffies(5000));
 }
 
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
-                                                const void *data)
+static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
+                                           const void *data)
 {
-       struct hashtable_t *hash = bat_priv->tt_local_hash;
        struct hlist_head *head;
        struct hlist_node *node;
-       struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
-       int index;
+       struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
+       uint32_t index;
 
        if (!hash)
                return NULL;
@@ -76,51 +66,46 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
-               if (!compare_eth(tt_local_entry, data))
+       hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
+               if (!compare_eth(tt_common_entry, data))
                        continue;
 
-               if (!atomic_inc_not_zero(&tt_local_entry->refcount))
+               if (!atomic_inc_not_zero(&tt_common_entry->refcount))
                        continue;
 
-               tt_local_entry_tmp = tt_local_entry;
+               tt_common_entry_tmp = tt_common_entry;
                break;
        }
        rcu_read_unlock();
 
-       return tt_local_entry_tmp;
+       return tt_common_entry_tmp;
 }
 
-static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
-                                                  const void *data)
+static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
+                                                const void *data)
 {
-       struct hashtable_t *hash = bat_priv->tt_global_hash;
-       struct hlist_head *head;
-       struct hlist_node *node;
-       struct tt_global_entry *tt_global_entry;
-       struct tt_global_entry *tt_global_entry_tmp = NULL;
-       int index;
-
-       if (!hash)
-               return NULL;
-
-       index = choose_orig(data, hash->size);
-       head = &hash->table[index];
+       struct tt_common_entry *tt_common_entry;
+       struct tt_local_entry *tt_local_entry = NULL;
 
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
-               if (!compare_eth(tt_global_entry, data))
-                       continue;
+       tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
+       if (tt_common_entry)
+               tt_local_entry = container_of(tt_common_entry,
+                                             struct tt_local_entry, common);
+       return tt_local_entry;
+}
 
-               if (!atomic_inc_not_zero(&tt_global_entry->refcount))
-                       continue;
+static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
+                                                  const void *data)
+{
+       struct tt_common_entry *tt_common_entry;
+       struct tt_global_entry *tt_global_entry = NULL;
 
-               tt_global_entry_tmp = tt_global_entry;
-               break;
-       }
-       rcu_read_unlock();
+       tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
+       if (tt_common_entry)
+               tt_global_entry = container_of(tt_common_entry,
+                                              struct tt_global_entry, common);
+       return tt_global_entry;
 
-       return tt_global_entry_tmp;
 }
 
 static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
@@ -133,15 +118,18 @@ static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
 
 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
 {
-       if (atomic_dec_and_test(&tt_local_entry->refcount))
-               kfree_rcu(tt_local_entry, rcu);
+       if (atomic_dec_and_test(&tt_local_entry->common.refcount))
+               kfree_rcu(tt_local_entry, common.rcu);
 }
 
 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
 {
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
 
-       tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
+       tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
+       tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+                                      common);
 
        if (tt_global_entry->orig_node)
                orig_node_free_ref(tt_global_entry->orig_node);
@@ -151,8 +139,9 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
 
 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
 {
-       if (atomic_dec_and_test(&tt_global_entry->refcount))
-               call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
+       if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+               call_rcu(&tt_global_entry->common.rcu,
+                        tt_global_entry_free_rcu);
 }
 
 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -201,6 +190,7 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
+       int hash_added;
 
        tt_local_entry = tt_local_hash_find(bat_priv, addr);
 
@@ -217,26 +207,33 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
                (uint8_t)atomic_read(&bat_priv->ttvn));
 
-       memcpy(tt_local_entry->addr, addr, ETH_ALEN);
-       tt_local_entry->last_seen = jiffies;
-       tt_local_entry->flags = NO_FLAGS;
+       memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
+       tt_local_entry->common.flags = NO_FLAGS;
        if (is_wifi_iface(ifindex))
-               tt_local_entry->flags |= TT_CLIENT_WIFI;
-       atomic_set(&tt_local_entry->refcount, 2);
+               tt_local_entry->common.flags |= TT_CLIENT_WIFI;
+       atomic_set(&tt_local_entry->common.refcount, 2);
+       tt_local_entry->last_seen = jiffies;
 
        /* the batman interface mac address should never be purged */
        if (compare_eth(addr, soft_iface->dev_addr))
-               tt_local_entry->flags |= TT_CLIENT_NOPURGE;
+               tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
 
-       tt_local_event(bat_priv, addr, tt_local_entry->flags);
+       hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
+                        &tt_local_entry->common,
+                        &tt_local_entry->common.hash_entry);
+
+       if (unlikely(hash_added != 0)) {
+               /* remove the reference for the hash */
+               tt_local_entry_free_ref(tt_local_entry);
+               goto out;
+       }
+
+       tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
 
        /* The local entry has to be marked as NEW to avoid to send it in
         * a full table response going out before the next ttvn increment
         * (consistency check) */
-       tt_local_entry->flags |= TT_CLIENT_NEW;
-
-       hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
-                tt_local_entry, &tt_local_entry->hash_entry);
+       tt_local_entry->common.flags |= TT_CLIENT_NEW;
 
        /* remove address from global hash if present */
        tt_global_entry = tt_global_hash_find(bat_priv, addr);
@@ -245,10 +242,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (tt_global_entry) {
                /* This node is probably going to update its tt table */
                tt_global_entry->orig_node->tt_poss_change = true;
-               /* The global entry has to be marked as PENDING and has to be
+               /* The global entry has to be marked as ROAMING and has to be
                 * kept for consistency purpose */
-               tt_global_entry->flags |= TT_CLIENT_PENDING;
-               send_roam_adv(bat_priv, tt_global_entry->addr,
+               tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+               tt_global_entry->roam_at = jiffies;
+               send_roam_adv(bat_priv, tt_global_entry->common.addr,
                              tt_global_entry->orig_node);
        }
 out:
@@ -310,13 +308,12 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->tt_local_hash;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hard_iface *primary_if;
        struct hlist_node *node;
        struct hlist_head *head;
-       size_t buf_size, pos;
-       char *buff;
-       int i, ret = 0;
+       uint32_t i;
+       int ret = 0;
 
        primary_if = primary_if_get_selected(bat_priv);
        if (!primary_if) {
@@ -337,51 +334,27 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
                   "announced via TT (TTVN: %u):\n",
                   net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
 
-       buf_size = 1;
-       /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               __hlist_for_each_rcu(node, head)
-                       buf_size += 29;
-               rcu_read_unlock();
-       }
-
-       buff = kmalloc(buf_size, GFP_ATOMIC);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       buff[0] = '\0';
-       pos = 0;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
-                       pos += snprintf(buff + pos, 30, " * %pM "
-                                       "[%c%c%c%c%c]\n",
-                                       tt_local_entry->addr,
-                                       (tt_local_entry->flags &
+                       seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
+                                       tt_common_entry->addr,
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_ROAM ? 'R' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_NOPURGE ? 'P' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_NEW ? 'N' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_PENDING ? 'X' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_WIFI ? 'W' : '.'));
                }
                rcu_read_unlock();
        }
-
-       seq_printf(seq, "%s", buff);
-       kfree(buff);
 out:
        if (primary_if)
                hardif_free_ref(primary_if);
@@ -392,13 +365,13 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
                                 struct tt_local_entry *tt_local_entry,
                                 uint16_t flags)
 {
-       tt_local_event(bat_priv, tt_local_entry->addr,
-                      tt_local_entry->flags | flags);
+       tt_local_event(bat_priv, tt_local_entry->common.addr,
+                      tt_local_entry->common.flags | flags);
 
        /* The local client has to be marked as "pending to be removed" but has
         * to be kept in the table in order to send it in a full table
         * response issued before the net ttvn increment (consistency check) */
-       tt_local_entry->flags |= TT_CLIENT_PENDING;
+       tt_local_entry->common.flags |= TT_CLIENT_PENDING;
 }
 
 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -414,7 +387,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
                             (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
 
        bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
-               "%s\n", tt_local_entry->addr, message);
+               "%s\n", tt_local_entry->common.addr, message);
 out:
        if (tt_local_entry)
                tt_local_entry_free_ref(tt_local_entry);
@@ -424,23 +397,27 @@ static void tt_local_purge(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_local_hash;
        struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
-       int i;
+       uint32_t i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
+                       if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
                                continue;
 
                        /* entry already marked for deletion */
-                       if (tt_local_entry->flags & TT_CLIENT_PENDING)
+                       if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
                                continue;
 
                        if (!is_out_of_time(tt_local_entry->last_seen,
@@ -451,7 +428,7 @@ static void tt_local_purge(struct bat_priv *bat_priv)
                                             TT_CLIENT_DEL);
                        bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
                                "pending to be removed: timed out\n",
-                               tt_local_entry->addr);
+                               tt_local_entry->common.addr);
                }
                spin_unlock_bh(list_lock);
        }
@@ -462,10 +439,11 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       struct tt_common_entry *tt_common_entry;
        struct tt_local_entry *tt_local_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       int i;
+       uint32_t i;
 
        if (!bat_priv->tt_local_hash)
                return;
@@ -477,9 +455,12 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
                        hlist_del_rcu(node);
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
                        tt_local_entry_free_ref(tt_local_entry);
                }
                spin_unlock_bh(list_lock);
@@ -527,6 +508,7 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
        struct tt_global_entry *tt_global_entry;
        struct orig_node *orig_node_tmp;
        int ret = 0;
+       int hash_added;
 
        tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
@@ -537,18 +519,24 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                if (!tt_global_entry)
                        goto out;
 
-               memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
+               memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+               tt_global_entry->common.flags = NO_FLAGS;
+               atomic_set(&tt_global_entry->common.refcount, 2);
                /* Assign the new orig_node */
                atomic_inc(&orig_node->refcount);
                tt_global_entry->orig_node = orig_node;
                tt_global_entry->ttvn = ttvn;
-               tt_global_entry->flags = NO_FLAGS;
                tt_global_entry->roam_at = 0;
-               atomic_set(&tt_global_entry->refcount, 2);
 
-               hash_add(bat_priv->tt_global_hash, compare_gtt,
-                        choose_orig, tt_global_entry,
-                        &tt_global_entry->hash_entry);
+               hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
+                                choose_orig, &tt_global_entry->common,
+                                &tt_global_entry->common.hash_entry);
+
+               if (unlikely(hash_added != 0)) {
+                       /* remove the reference for the hash */
+                       tt_global_entry_free_ref(tt_global_entry);
+                       goto out_remove;
+               }
                atomic_inc(&orig_node->tt_size);
        } else {
                if (tt_global_entry->orig_node != orig_node) {
@@ -559,20 +547,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                        orig_node_free_ref(orig_node_tmp);
                        atomic_inc(&orig_node->tt_size);
                }
+               tt_global_entry->common.flags = NO_FLAGS;
                tt_global_entry->ttvn = ttvn;
-               tt_global_entry->flags = NO_FLAGS;
                tt_global_entry->roam_at = 0;
        }
 
        if (wifi)
-               tt_global_entry->flags |= TT_CLIENT_WIFI;
+               tt_global_entry->common.flags |= TT_CLIENT_WIFI;
 
        bat_dbg(DBG_TT, bat_priv,
                "Creating new global tt entry: %pM (via %pM)\n",
-               tt_global_entry->addr, orig_node->orig);
+               tt_global_entry->common.addr, orig_node->orig);
 
+out_remove:
        /* remove address from local hash if present */
-       tt_local_remove(bat_priv, tt_global_entry->addr,
+       tt_local_remove(bat_priv, tt_global_entry->common.addr,
                        "global tt received", roaming);
        ret = 1;
 out:
@@ -586,13 +575,13 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hard_iface *primary_if;
        struct hlist_node *node;
        struct hlist_head *head;
-       size_t buf_size, pos;
-       char *buff;
-       int i, ret = 0;
+       uint32_t i;
+       int ret = 0;
 
        primary_if = primary_if_get_selected(bat_priv);
        if (!primary_if) {
@@ -615,53 +604,32 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq, "       %-13s %s       %-15s %s %s\n",
                   "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
 
-       buf_size = 1;
-       /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
-        * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               __hlist_for_each_rcu(node, head)
-                       buf_size += 67;
-               rcu_read_unlock();
-       }
-
-       buff = kmalloc(buf_size, GFP_ATOMIC);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       buff[0] = '\0';
-       pos = 0;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_global_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
-                       pos += snprintf(buff + pos, 69,
-                                       " * %pM  (%3u) via %pM     (%3u)   "
-                                       "[%c%c%c]\n", tt_global_entry->addr,
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
+                       seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   "
+                                       "[%c%c%c]\n",
+                                       tt_global_entry->common.addr,
                                        tt_global_entry->ttvn,
                                        tt_global_entry->orig_node->orig,
                                        (uint8_t) atomic_read(
                                                &tt_global_entry->orig_node->
                                                last_ttvn),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_ROAM ? 'R' : '.'),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_PENDING ? 'X' : '.'),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_WIFI ? 'W' : '.'));
                }
                rcu_read_unlock();
        }
-
-       seq_printf(seq, "%s", buff);
-       kfree(buff);
 out:
        if (primary_if)
                hardif_free_ref(primary_if);
@@ -677,13 +645,13 @@ static void _tt_global_del(struct bat_priv *bat_priv,
 
        bat_dbg(DBG_TT, bat_priv,
                "Deleting global tt entry %pM (via %pM): %s\n",
-               tt_global_entry->addr, tt_global_entry->orig_node->orig,
+               tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
                message);
 
        atomic_dec(&tt_global_entry->orig_node->tt_size);
 
-       hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
-                   tt_global_entry->addr);
+       hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
+                   tt_global_entry->common.addr);
 out:
        if (tt_global_entry)
                tt_global_entry_free_ref(tt_global_entry);
@@ -694,6 +662,7 @@ void tt_global_del(struct bat_priv *bat_priv,
                   const char *message, bool roaming)
 {
        struct tt_global_entry *tt_global_entry = NULL;
+       struct tt_local_entry *tt_local_entry = NULL;
 
        tt_global_entry = tt_global_hash_find(bat_priv, addr);
        if (!tt_global_entry)
@@ -701,22 +670,37 @@ void tt_global_del(struct bat_priv *bat_priv,
 
        if (tt_global_entry->orig_node == orig_node) {
                if (roaming) {
-                       tt_global_entry->flags |= TT_CLIENT_ROAM;
-                       tt_global_entry->roam_at = jiffies;
-                       goto out;
+                       /* if we are deleting a global entry due to a roam
+                        * event, there are two possibilities:
+                        * 1) the client roamed from node A to node B => we mark
+                        *    it with TT_CLIENT_ROAM, we start a timer and we
+                        *    wait for node B to claim it. In case of timeout
+                        *    the entry is purged.
+                        * 2) the client roamed to us => we can directly delete
+                        *    the global entry, since it is useless now. */
+                       tt_local_entry = tt_local_hash_find(bat_priv,
+                                                           tt_global_entry->common.addr);
+                       if (!tt_local_entry) {
+                               tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+                               tt_global_entry->roam_at = jiffies;
+                               goto out;
+                       }
                }
                _tt_global_del(bat_priv, tt_global_entry, message);
        }
 out:
        if (tt_global_entry)
                tt_global_entry_free_ref(tt_global_entry);
+       if (tt_local_entry)
+               tt_local_entry_free_ref(tt_local_entry);
 }
 
 void tt_global_del_orig(struct bat_priv *bat_priv,
                        struct orig_node *orig_node, const char *message)
 {
        struct tt_global_entry *tt_global_entry;
-       int i;
+       struct tt_common_entry *tt_common_entry;
+       uint32_t i;
        struct hashtable_t *hash = bat_priv->tt_global_hash;
        struct hlist_node *node, *safe;
        struct hlist_head *head;
@@ -730,14 +714,18 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, safe,
+               hlist_for_each_entry_safe(tt_common_entry, node, safe,
                                         head, hash_entry) {
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        if (tt_global_entry->orig_node == orig_node) {
                                bat_dbg(DBG_TT, bat_priv,
                                        "Deleting global tt entry %pM "
-                                       "(via %pM): originator time out\n",
-                                       tt_global_entry->addr,
-                                       tt_global_entry->orig_node->orig);
+                                       "(via %pM): %s\n",
+                                       tt_global_entry->common.addr,
+                                       tt_global_entry->orig_node->orig,
+                                       message);
                                hlist_del_rcu(node);
                                tt_global_entry_free_ref(tt_global_entry);
                        }
@@ -750,20 +738,24 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
 static void tt_global_roam_purge(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
-       int i;
+       uint32_t i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
+                       if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
                                continue;
                        if (!is_out_of_time(tt_global_entry->roam_at,
                                            TT_CLIENT_ROAM_TIMEOUT * 1000))
@@ -771,7 +763,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
 
                        bat_dbg(DBG_TT, bat_priv, "Deleting global "
                                "tt entry (%pM): Roaming timeout\n",
-                               tt_global_entry->addr);
+                               tt_global_entry->common.addr);
                        atomic_dec(&tt_global_entry->orig_node->tt_size);
                        hlist_del_rcu(node);
                        tt_global_entry_free_ref(tt_global_entry);
@@ -785,10 +777,11 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       int i;
+       uint32_t i;
 
        if (!bat_priv->tt_global_hash)
                return;
@@ -800,9 +793,12 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
                        hlist_del_rcu(node);
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        tt_global_entry_free_ref(tt_global_entry);
                }
                spin_unlock_bh(list_lock);
@@ -818,8 +814,8 @@ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
 {
        bool ret = false;
 
-       if (tt_local_entry->flags & TT_CLIENT_WIFI &&
-           tt_global_entry->flags & TT_CLIENT_WIFI)
+       if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
+           tt_global_entry->common.flags & TT_CLIENT_WIFI)
                ret = true;
 
        return ret;
@@ -852,7 +848,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
 
        /* A global client marked as PENDING has already moved from that
         * originator */
-       if (tt_global_entry->flags & TT_CLIENT_PENDING)
+       if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
                goto out;
 
        orig_node = tt_global_entry->orig_node;
@@ -871,29 +867,34 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
 {
        uint16_t total = 0, total_one;
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node;
        struct hlist_head *head;
-       int i, j;
+       uint32_t i;
+       int j;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_global_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        if (compare_eth(tt_global_entry->orig_node,
                                        orig_node)) {
                                /* Roaming clients are in the global table for
                                 * consistency only. They don't have to be
                                 * taken into account while computing the
                                 * global crc */
-                               if (tt_global_entry->flags & TT_CLIENT_ROAM)
+                               if (tt_common_entry->flags & TT_CLIENT_ROAM)
                                        continue;
                                total_one = 0;
                                for (j = 0; j < ETH_ALEN; j++)
                                        total_one = crc16_byte(total_one,
-                                               tt_global_entry->addr[j]);
+                                               tt_common_entry->addr[j]);
                                total ^= total_one;
                        }
                }
@@ -908,25 +909,26 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
 {
        uint16_t total = 0, total_one;
        struct hashtable_t *hash = bat_priv->tt_local_hash;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hlist_node *node;
        struct hlist_head *head;
-       int i, j;
+       uint32_t i;
+       int j;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
                        /* not yet committed clients have not to be taken into
                         * account while computing the CRC */
-                       if (tt_local_entry->flags & TT_CLIENT_NEW)
+                       if (tt_common_entry->flags & TT_CLIENT_NEW)
                                continue;
                        total_one = 0;
                        for (j = 0; j < ETH_ALEN; j++)
                                total_one = crc16_byte(total_one,
-                                                  tt_local_entry->addr[j]);
+                                                  tt_common_entry->addr[j]);
                        total ^= total_one;
                }
                rcu_read_unlock();
@@ -1015,21 +1017,25 @@ unlock:
 /* data_ptr is useless here, but has to be kept to respect the prototype */
 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-       const struct tt_local_entry *tt_local_entry = entry_ptr;
+       const struct tt_common_entry *tt_common_entry = entry_ptr;
 
-       if (tt_local_entry->flags & TT_CLIENT_NEW)
+       if (tt_common_entry->flags & TT_CLIENT_NEW)
                return 0;
        return 1;
 }
 
 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-       const struct tt_global_entry *tt_global_entry = entry_ptr;
+       const struct tt_common_entry *tt_common_entry = entry_ptr;
+       const struct tt_global_entry *tt_global_entry;
        const struct orig_node *orig_node = data_ptr;
 
-       if (tt_global_entry->flags & TT_CLIENT_ROAM)
+       if (tt_common_entry->flags & TT_CLIENT_ROAM)
                return 0;
 
+       tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+                                      common);
+
        return (tt_global_entry->orig_node == orig_node);
 }
 
@@ -1040,7 +1046,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
                                                              const void *),
                                              void *cb_data)
 {
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct tt_query_packet *tt_response;
        struct tt_change *tt_change;
        struct hlist_node *node;
@@ -1048,7 +1054,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        struct sk_buff *skb = NULL;
        uint16_t tt_tot, tt_count;
        ssize_t tt_query_size = sizeof(struct tt_query_packet);
-       int i;
+       uint32_t i;
 
        if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
                tt_len = primary_if->soft_iface->mtu - tt_query_size;
@@ -1072,15 +1078,16 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
                        if (tt_count == tt_tot)
                                break;
 
-                       if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
+                       if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
                                continue;
 
-                       memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
+                       memcpy(tt_change->addr, tt_common_entry->addr,
+                              ETH_ALEN);
                        tt_change->flags = NO_FLAGS;
 
                        tt_count++;
@@ -1187,11 +1194,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
                (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
 
        /* Let's get the orig node of the REAL destination */
-       req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
+       req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
        if (!req_dst_orig_node)
                goto out;
 
-       res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
+       res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
        if (!res_dst_orig_node)
                goto out;
 
@@ -1317,7 +1324,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
        my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
        req_ttvn = tt_request->ttvn;
 
-       orig_node = get_orig_node(bat_priv, tt_request->src);
+       orig_node = orig_hash_find(bat_priv, tt_request->src);
        if (!orig_node)
                goto out;
 
@@ -1497,7 +1504,7 @@ bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
                goto out;
        /* Check if the client has been logically deleted (but is kept for
         * consistency purpose) */
-       if (tt_local_entry->flags & TT_CLIENT_PENDING)
+       if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
                goto out;
        ret = true;
 out:
@@ -1720,45 +1727,53 @@ void tt_free(struct bat_priv *bat_priv)
        kfree(bat_priv->tt_buff);
 }
 
-/* This function will reset the specified flags from all the entries in
- * the given hash table and will increment num_local_tt for each involved
- * entry */
-static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
+/* This function will enable or disable the specified flags for all the entries
+ * in the given hash table and returns the number of modified entries */
+static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
+                            bool enable)
 {
-       int i;
-       struct hashtable_t *hash = bat_priv->tt_local_hash;
+       uint32_t i;
+       uint16_t changed_num = 0;
        struct hlist_head *head;
        struct hlist_node *node;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
 
        if (!hash)
-               return;
+               goto out;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
-                       if (!(tt_local_entry->flags & flags))
-                               continue;
-                       tt_local_entry->flags &= ~flags;
-                       atomic_inc(&bat_priv->num_local_tt);
+                       if (enable) {
+                               if ((tt_common_entry->flags & flags) == flags)
+                                       continue;
+                               tt_common_entry->flags |= flags;
+                       } else {
+                               if (!(tt_common_entry->flags & flags))
+                                       continue;
+                               tt_common_entry->flags &= ~flags;
+                       }
+                       changed_num++;
                }
                rcu_read_unlock();
        }
-
+out:
+       return changed_num;
 }
 
 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_local_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_local_entry *tt_local_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
-       int i;
+       uint32_t i;
 
        if (!hash)
                return;
@@ -1768,16 +1783,19 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
+                       if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
                                continue;
 
                        bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
-                               "(%pM): pending\n", tt_local_entry->addr);
+                               "(%pM): pending\n", tt_common_entry->addr);
 
                        atomic_dec(&bat_priv->num_local_tt);
                        hlist_del_rcu(node);
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
                        tt_local_entry_free_ref(tt_local_entry);
                }
                spin_unlock_bh(list_lock);
@@ -1787,7 +1805,11 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
 
 void tt_commit_changes(struct bat_priv *bat_priv)
 {
-       tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
+       uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
+                                           TT_CLIENT_NEW, false);
+       /* all the reset entries have now to be effectively counted as local
+        * entries */
+       atomic_add(changed_num, &bat_priv->num_local_tt);
        tt_local_purge_pending_clients(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
index ab8d0fe6df5a4ed6c16092705e79e23675838d0c..e9eb043719ace347b1ac29142879cf15d218f8f4 100644 (file)
@@ -222,24 +222,24 @@ struct socket_packet {
        struct icmp_packet_rr icmp_packet;
 };
 
-struct tt_local_entry {
+struct tt_common_entry {
        uint8_t addr[ETH_ALEN];
        struct hlist_node hash_entry;
-       unsigned long last_seen;
        uint16_t flags;
        atomic_t refcount;
        struct rcu_head rcu;
 };
 
+struct tt_local_entry {
+       struct tt_common_entry common;
+       unsigned long last_seen;
+};
+
 struct tt_global_entry {
-       uint8_t addr[ETH_ALEN];
-       struct hlist_node hash_entry; /* entry in the global table */
+       struct tt_common_entry common;
        struct orig_node *orig_node;
        uint8_t ttvn;
-       uint16_t flags; /* only TT_GLOBAL_ROAM is used */
        unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
-       atomic_t refcount;
-       struct rcu_head rcu;
 };
 
 struct tt_change_node {
index f81a6b668b0c524d33f4f3b7794e2181a1897a9b..cc3b9f2f3b5dde8f524f05d8089fa9adf2384292 100644 (file)
@@ -66,7 +66,7 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2)
 
 /* hash function to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(const void *data, int size)
+static uint32_t vis_info_choose(const void *data, uint32_t size)
 {
        const struct vis_info *vis_info = data;
        const struct vis_packet *packet;
@@ -96,7 +96,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
        struct hlist_head *head;
        struct hlist_node *node;
        struct vis_info *vis_info, *vis_info_tmp = NULL;
-       int index;
+       uint32_t index;
 
        if (!hash)
                return NULL;
@@ -202,7 +202,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
        HLIST_HEAD(vis_if_list);
        struct if_list_entry *entry;
        struct hlist_node *pos, *n;
-       int i, j, ret = 0;
+       uint32_t i;
+       int j, ret = 0;
        int vis_server = atomic_read(&bat_priv->vis_mode);
        size_t buff_pos, buf_size;
        char *buff;
@@ -556,7 +557,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
        struct hlist_head *head;
        struct orig_node *orig_node;
        struct vis_packet *packet;
-       int best_tq = -1, i;
+       int best_tq = -1;
+       uint32_t i;
 
        packet = (struct vis_packet *)info->skb_packet->data;
 
@@ -607,8 +609,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
        struct vis_info *info = bat_priv->my_vis_info;
        struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
        struct vis_info_entry *entry;
-       struct tt_local_entry *tt_local_entry;
-       int best_tq = -1, i;
+       struct tt_common_entry *tt_common_entry;
+       int best_tq = -1;
+       uint32_t i;
 
        info->first_seen = jiffies;
        packet->vis_type = atomic_read(&bat_priv->vis_mode);
@@ -669,13 +672,13 @@ next:
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node, head,
+               hlist_for_each_entry_rcu(tt_common_entry, node, head,
                                         hash_entry) {
                        entry = (struct vis_info_entry *)
                                        skb_put(info->skb_packet,
                                                sizeof(*entry));
                        memset(entry->src, 0, ETH_ALEN);
-                       memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
+                       memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
                        entry->quality = 0; /* 0 means TT */
                        packet->entries++;
 
@@ -696,7 +699,7 @@ unlock:
  * held */
 static void purge_vis_packets(struct bat_priv *bat_priv)
 {
-       int i;
+       uint32_t i;
        struct hashtable_t *hash = bat_priv->vis_hash;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
@@ -733,7 +736,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
        struct sk_buff *skb;
        struct hard_iface *hard_iface;
        uint8_t dstaddr[ETH_ALEN];
-       int i;
+       uint32_t i;
 
 
        packet = (struct vis_packet *)info->skb_packet->data;
index 42d53b85a80826c3ddc5af51c79a8684293a25f2..a779ec703323ce7293522e3cdb7af1d41b3c58a2 100644 (file)
@@ -56,8 +56,8 @@
 
 #define VERSION "1.3"
 
-static int compress_src = 1;
-static int compress_dst = 1;
+static bool compress_src = true;
+static bool compress_dst = true;
 
 static LIST_HEAD(bnep_session_list);
 static DECLARE_RWSEM(bnep_session_sem);
index 6d38d80195cb40c43ae6c292a7406e58d64d05b3..845da3ee56a0d3966bdeb5ab627ef3dc49c8dead 100644 (file)
@@ -643,7 +643,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        if (!test_bit(HCI_RAW, &hdev->flags)) {
                set_bit(HCI_INIT, &hdev->flags);
                __hci_request(hdev, hci_reset_req, 0,
-                                       msecs_to_jiffies(HCI_INIT_TIMEOUT));
+                                       msecs_to_jiffies(250));
                clear_bit(HCI_INIT, &hdev->flags);
        }
 
index 919e3c0e74aa4c61ff3e920bad6c7c0ae5f5b11f..4221bd256bddfd89e147f1605f0dd334aa042ea6 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-static int enable_le;
+static bool enable_le;
 
 /* Handle HCI Event packets */
 
index 189a667c293bb3d4f53ec2d33e8dd94432201cce..6d94616af3129b3519c914c3458eb30cad40de05 100644 (file)
@@ -49,7 +49,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-static int enable_mgmt;
+static bool enable_mgmt;
 
 /* ----- HCI socket interface ----- */
 
index cd7bb3d7f2b4de657a997a0362b0c03917ba889f..aa78d8c4b93be75f630e1389b4886ffdd4ab3623 100644 (file)
@@ -57,7 +57,7 @@
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 
-int disable_ertm;
+bool disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
index 09a3cbcf794e9cd6297f6a07841381009a44fd9a..501649bf5596d0373d6b6dd3be791dd1345ad33e 100644 (file)
@@ -51,8 +51,8 @@
 
 #define VERSION "1.11"
 
-static int disable_cfc;
-static int l2cap_ertm;
+static bool disable_cfc;
+static bool l2cap_ertm;
 static int channel_mtu = -1;
 static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
 
index 0d59e61d7822fd69790a814cb23b0c09b92b269f..5dc2f2126fac669a2f1b961efb8ce665768da2f2 100644 (file)
@@ -51,7 +51,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/sco.h>
 
-static int disable_esco;
+static bool disable_esco;
 
 static const struct proto_ops sco_sock_ops;
 
index f20c4fd915a8991ef86cd3a76b61b0c8cb08f4bb..ba780cc8e515d2dcd6424d627c286ec1ceb86a47 100644 (file)
@@ -62,7 +62,7 @@ static int __init br_init(void)
 
        brioctl_set(br_ioctl_deviceless_stub);
 
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
        br_fdb_test_addr_hook = br_fdb_test_addr;
 #endif
 
@@ -93,7 +93,7 @@ static void __exit br_deinit(void)
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
        br_netfilter_fini();
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
        br_fdb_test_addr_hook = NULL;
 #endif
 
index feb77ea7b58ed2308938c4a9bedd8be82394c464..71773b014e0ce5a05db8c3acdaa2d0b680314415 100644 (file)
@@ -170,8 +170,11 @@ static int br_set_mac_address(struct net_device *dev, void *p)
                return -EINVAL;
 
        spin_lock_bh(&br->lock);
-       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-       br_stp_change_bridge_id(br, addr->sa_data);
+       if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+               memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+               br_fdb_change_mac_address(br, addr->sa_data);
+               br_stp_change_bridge_id(br, addr->sa_data);
+       }
        br->flags |= BR_SET_MAC_ADDR;
        spin_unlock_bh(&br->lock);
 
@@ -186,7 +189,8 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
        strcpy(info->bus_info, "N/A");
 }
 
-static u32 br_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t br_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct net_bridge *br = netdev_priv(dev);
 
@@ -341,10 +345,10 @@ void br_dev_setup(struct net_device *dev)
        dev->priv_flags = IFF_EBRIDGE;
 
        dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-                       NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
+                       NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
                        NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
        dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-                          NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
+                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
                           NETIF_F_HW_VLAN_TX;
 
        br->dev = dev;
index c8e7861b88b02983e0d5ff202de4e6165bb022b8..f963f6b1884fd96dc12a96743a2f04669194cb3a 100644 (file)
@@ -28,7 +28,8 @@
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                      const unsigned char *addr);
-static void fdb_notify(const struct net_bridge_fdb_entry *, int);
+static void fdb_notify(struct net_bridge *br,
+                      const struct net_bridge_fdb_entry *, int);
 
 static u32 fdb_salt __read_mostly;
 
@@ -80,10 +81,10 @@ static void fdb_rcu_free(struct rcu_head *head)
        kmem_cache_free(br_fdb_cache, ent);
 }
 
-static inline void fdb_delete(struct net_bridge_fdb_entry *f)
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
-       fdb_notify(f, RTM_DELNEIGH);
        hlist_del_rcu(&f->hlist);
+       fdb_notify(br, f, RTM_DELNEIGH);
        call_rcu(&f->rcu, fdb_rcu_free);
 }
 
@@ -114,7 +115,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
                                }
 
                                /* delete old one */
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                                goto insert;
                        }
                }
@@ -126,6 +127,18 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
        spin_unlock_bh(&br->hash_lock);
 }
 
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+       struct net_bridge_fdb_entry *f;
+
+       /* If old entry was unassociated with any port, then delete it. */
+       f = __br_fdb_get(br, br->dev->dev_addr);
+       if (f && f->is_local && !f->dst)
+               fdb_delete(br, f);
+
+       fdb_insert(br, NULL, newaddr);
+}
+
 void br_fdb_cleanup(unsigned long _data)
 {
        struct net_bridge *br = (struct net_bridge *)_data;
@@ -144,7 +157,7 @@ void br_fdb_cleanup(unsigned long _data)
                                continue;
                        this_timer = f->updated + delay;
                        if (time_before_eq(this_timer, jiffies))
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                        else if (time_before(this_timer, next_timer))
                                next_timer = this_timer;
                }
@@ -165,7 +178,7 @@ void br_fdb_flush(struct net_bridge *br)
                struct hlist_node *h, *n;
                hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
                        if (!f->is_static)
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                }
        }
        spin_unlock_bh(&br->hash_lock);
@@ -209,7 +222,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
                                }
                        }
 
-                       fdb_delete(f);
+                       fdb_delete(br, f);
                skip_delete: ;
                }
        }
@@ -234,7 +247,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
        return NULL;
 }
 
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 /* Interface used by ATM LANE hook to test
  * if an addr is on some other bridge port */
 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
@@ -249,7 +262,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
                ret = 0;
        else {
                fdb = __br_fdb_get(port->br, addr);
-               ret = fdb && fdb->dst->dev != dev &&
+               ret = fdb && fdb->dst && fdb->dst->dev != dev &&
                        fdb->dst->state == BR_STATE_FORWARDING;
        }
        rcu_read_unlock();
@@ -281,6 +294,10 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
                        if (has_expired(br, f))
                                continue;
 
+                       /* ignore pseudo entry for local MAC address */
+                       if (!f->dst)
+                               continue;
+
                        if (skip) {
                                --skip;
                                continue;
@@ -347,7 +364,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
                fdb->is_static = 0;
                fdb->updated = fdb->used = jiffies;
                hlist_add_head_rcu(&fdb->hlist, head);
-               fdb_notify(fdb, RTM_NEWNEIGH);
        }
        return fdb;
 }
@@ -371,7 +387,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                br_warn(br, "adding interface %s with same address "
                       "as a received packet\n",
                       source->dev->name);
-               fdb_delete(fdb);
+               fdb_delete(br, fdb);
        }
 
        fdb = fdb_create(head, source, addr);
@@ -379,6 +395,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                return -ENOMEM;
 
        fdb->is_local = fdb->is_static = 1;
+       fdb_notify(br, fdb, RTM_NEWNEIGH);
        return 0;
 }
 
@@ -424,9 +441,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                }
        } else {
                spin_lock(&br->hash_lock);
-               if (likely(!fdb_find(head, addr)))
-                       fdb_create(head, source, addr);
-
+               if (likely(!fdb_find(head, addr))) {
+                       fdb = fdb_create(head, source, addr);
+                       if (fdb)
+                               fdb_notify(br, fdb, RTM_NEWNEIGH);
+               }
                /* else  we lose race and someone else inserts
                 * it first, don't bother updating
                 */
@@ -446,7 +465,7 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
                return NUD_REACHABLE;
 }
 
-static int fdb_fill_info(struct sk_buff *skb,
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
                         const struct net_bridge_fdb_entry *fdb,
                         u32 pid, u32 seq, int type, unsigned int flags)
 {
@@ -459,14 +478,13 @@ static int fdb_fill_info(struct sk_buff *skb,
        if (nlh == NULL)
                return -EMSGSIZE;
 
-
        ndm = nlmsg_data(nlh);
        ndm->ndm_family  = AF_BRIDGE;
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = 0;
        ndm->ndm_type    = 0;
-       ndm->ndm_ifindex = fdb->dst->dev->ifindex;
+       ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
        ndm->ndm_state   = fdb_to_nud(fdb);
 
        NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
@@ -491,9 +509,10 @@ static inline size_t fdb_nlmsg_size(void)
                + nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
-static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+static void fdb_notify(struct net_bridge *br,
+                      const struct net_bridge_fdb_entry *fdb, int type)
 {
-       struct net *net = dev_net(fdb->dst->dev);
+       struct net *net = dev_net(br->dev);
        struct sk_buff *skb;
        int err = -ENOBUFS;
 
@@ -501,7 +520,7 @@ static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
        if (skb == NULL)
                goto errout;
 
-       err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
+       err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -538,7 +557,7 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                if (idx < cb->args[0])
                                        goto skip;
 
-                               if (fdb_fill_info(skb, f,
+                               if (fdb_fill_info(skb, br, f,
                                                  NETLINK_CB(cb->skb).pid,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_NEWNEIGH,
@@ -556,7 +575,7 @@ skip:
        return skb->len;
 }
 
-/* Create new static fdb entry */
+/* Update (create or replace) forwarding database entry */
 static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
                         __u16 state, __u16 flags)
 {
@@ -572,19 +591,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
                fdb = fdb_create(head, source, addr);
                if (!fdb)
                        return -ENOMEM;
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
        } else {
                if (flags & NLM_F_EXCL)
                        return -EEXIST;
+       }
+
+       if (fdb_to_nud(fdb) != state) {
+               if (state & NUD_PERMANENT)
+                       fdb->is_local = fdb->is_static = 1;
+               else if (state & NUD_NOARP) {
+                       fdb->is_local = 0;
+                       fdb->is_static = 1;
+               } else
+                       fdb->is_local = fdb->is_static = 0;
 
-               if (flags & NLM_F_REPLACE)
-                       fdb->updated = fdb->used = jiffies;
-               fdb->is_local = fdb->is_static = 0;
+               fdb->updated = fdb->used = jiffies;
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
        }
 
-       if (state & NUD_PERMANENT)
-               fdb->is_local = fdb->is_static = 1;
-       else if (state & NUD_NOARP)
-               fdb->is_static = 1;
        return 0;
 }
 
@@ -627,6 +652,11 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                return -EINVAL;
        }
 
+       if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+               pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+               return -EINVAL;
+       }
+
        p = br_port_get_rtnl(dev);
        if (p == NULL) {
                pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -634,9 +664,15 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                return -EINVAL;
        }
 
-       spin_lock_bh(&p->br->hash_lock);
-       err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
-       spin_unlock_bh(&p->br->hash_lock);
+       if (ndm->ndm_flags & NTF_USE) {
+               rcu_read_lock();
+               br_fdb_update(p->br, p, addr);
+               rcu_read_unlock();
+       } else {
+               spin_lock_bh(&p->br->hash_lock);
+               err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+               spin_unlock_bh(&p->br->hash_lock);
+       }
 
        return err;
 }
@@ -651,7 +687,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
        if (!fdb)
                return -ENOENT;
 
-       fdb_delete(fdb);
+       fdb_delete(p->br, fdb);
        return 0;
 }
 
index ee64287f1290d5a1f35ca9f7b64aeb4bac21eedf..61f65344e711fc4b87856889c8d83e89d0d911d6 100644 (file)
@@ -98,7 +98,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
 /* called with rcu_read_lock */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-       if (should_deliver(to, skb)) {
+       if (to && should_deliver(to, skb)) {
                __br_deliver(to, skb);
                return;
        }
index f603e5b0b9309cf24df5af863d40431ad6ee201c..0a942fbccc9a64592d486199608e6527ebc8de8f 100644 (file)
@@ -296,10 +296,11 @@ int br_min_mtu(const struct net_bridge *br)
 /*
  * Recomputes features using slave's features
  */
-u32 br_features_recompute(struct net_bridge *br, u32 features)
+netdev_features_t br_features_recompute(struct net_bridge *br,
+       netdev_features_t features)
 {
        struct net_bridge_port *p;
-       u32 mask;
+       netdev_features_t mask;
 
        if (list_empty(&br->port_list))
                return features;
index 995cbe0ac0b2b1e74f2b4762d61a13102d9f66bb..568d5bf175341b4f84ef284173b5d6da623ceafb 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/mld.h>
 #include <net/addrconf.h>
@@ -36,7 +36,7 @@
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
        if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
@@ -52,7 +52,7 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
        switch (a->proto) {
        case htons(ETH_P_IP):
                return a->u.ip4 == b->u.ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
 #endif
@@ -65,7 +65,7 @@ static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
        return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
                                const struct in6_addr *ip)
 {
@@ -79,7 +79,7 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
        switch (ip->proto) {
        case htons(ETH_P_IP):
                return __br_ip4_hash(mdb, ip->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return __br_ip6_hash(mdb, &ip->u.ip6);
 #endif
@@ -121,13 +121,13 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
        return br_mdb_ip_get(mdb, &br_dst);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
        struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
 {
        struct br_ip br_dst;
 
-       ipv6_addr_copy(&br_dst.u.ip6, dst);
+       br_dst.u.ip6 = *dst;
        br_dst.proto = htons(ETH_P_IPV6);
 
        return br_mdb_ip_get(mdb, &br_dst);
@@ -152,9 +152,9 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
        case htons(ETH_P_IP):
                ip.u.ip4 = ip_hdr(skb)->daddr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
-               ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
+               ip.u.ip6 = ipv6_hdr(skb)->daddr;
                break;
 #endif
        default:
@@ -411,7 +411,7 @@ out:
        return skb;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
                                                    const struct in6_addr *group)
 {
@@ -474,7 +474,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        mldq->mld_cksum = 0;
        mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
        mldq->mld_reserved = 0;
-       ipv6_addr_copy(&mldq->mld_mca, group);
+       mldq->mld_mca = *group;
 
        /* checksum */
        mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
@@ -496,7 +496,7 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
        switch (addr->proto) {
        case htons(ETH_P_IP):
                return br_ip4_multicast_alloc_query(br, addr->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
 #endif
@@ -773,7 +773,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        return br_multicast_add_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_add_group(struct net_bridge *br,
                                      struct net_bridge_port *port,
                                      const struct in6_addr *group)
@@ -783,7 +783,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
        if (!ipv6_is_transient_multicast(group))
                return 0;
 
-       ipv6_addr_copy(&br_group.u.ip6, group);
+       br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
 
        return br_multicast_add_group(br, port, &br_group);
@@ -845,7 +845,7 @@ static void br_multicast_send_query(struct net_bridge *br,
        br_group.proto = htons(ETH_P_IP);
        __br_multicast_send_query(br, port, &br_group);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        br_group.proto = htons(ETH_P_IPV6);
        __br_multicast_send_query(br, port, &br_group);
 #endif
@@ -989,7 +989,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                                        struct net_bridge_port *port,
                                        struct sk_buff *skb)
@@ -1185,7 +1185,7 @@ out:
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_query(struct net_bridge *br,
                                  struct net_bridge_port *port,
                                  struct sk_buff *skb)
@@ -1334,7 +1334,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
        br_multicast_leave_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
                                         const struct in6_addr *group)
@@ -1344,7 +1344,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
        if (!ipv6_is_transient_multicast(group))
                return;
 
-       ipv6_addr_copy(&br_group.u.ip6, group);
+       br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
 
        br_multicast_leave_group(br, port, &br_group);
@@ -1449,7 +1449,7 @@ err_out:
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_multicast_ipv6_rcv(struct net_bridge *br,
                                 struct net_bridge_port *port,
                                 struct sk_buff *skb)
@@ -1458,6 +1458,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
        const struct ipv6hdr *ip6h;
        u8 icmp6_type;
        u8 nexthdr;
+       __be16 frag_off;
        unsigned len;
        int offset;
        int err;
@@ -1483,7 +1484,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                return -EINVAL;
 
        nexthdr = ip6h->nexthdr;
-       offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+       offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
 
        if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
                return 0;
@@ -1501,6 +1502,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
 
        __skb_pull(skb2, offset);
        skb_reset_transport_header(skb2);
+       skb_postpull_rcsum(skb2, skb_network_header(skb2),
+                          skb_network_header_len(skb2));
 
        icmp6_type = icmp6_hdr(skb2)->icmp6_type;
 
@@ -1593,7 +1596,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                return br_multicast_ipv4_rcv(br, port, skb);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return br_multicast_ipv6_rcv(br, port, skb);
 #endif
@@ -1770,7 +1773,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
        int err = 0;
        struct net_bridge_mdb_htable *mdb;
 
-       spin_lock(&br->multicast_lock);
+       spin_lock_bh(&br->multicast_lock);
        if (br->multicast_disabled == !val)
                goto unlock;
 
@@ -1806,7 +1809,7 @@ rollback:
        }
 
 unlock:
-       spin_unlock(&br->multicast_lock);
+       spin_unlock_bh(&br->multicast_lock);
 
        return err;
 }
index d6ec3720c77e448c4fd014f78397dfe5fb52e592..84122472656c3a70f0c0a93020768e81ecb3972a 100644 (file)
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
        return NULL;
 }
 
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+       return dst->dev->mtu;
+}
+
 static struct dst_ops fake_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
        .update_pmtu =          fake_update_pmtu,
        .cow_metrics =          fake_cow_metrics,
        .neigh_lookup =         fake_neigh_lookup,
+       .mtu =                  fake_mtu,
 };
 
 /*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
        dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-       rt->dst.flags   = DST_NOXFRM;
+       rt->dst.flags   = DST_NOXFRM | DST_NOPEER;
        rt->dst.ops = &fake_dst_ops;
 }
 
@@ -356,7 +362,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
        if (!skb->dev)
                goto free_skb;
        dst = skb_dst(skb);
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh->hh.hh_len) {
                neigh_hh_bridge(&neigh->hh, skb);
                skb->dev = nf_bridge->physindev;
@@ -807,7 +813,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
        return NF_STOLEN;
 }
 
-#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
index e5f9ece3c9a0f9637c8ad98382b88705b27e1d30..a1daf8227ed11c1a1853a8fb5a246919e9deb5f5 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/sock.h>
 
 #include "br_private.h"
+#include "br_private_stp.h"
 
 static inline size_t br_nlmsg_size(void)
 {
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 
        p->state = new_state;
        br_log_state(p);
+
+       spin_lock_bh(&p->br->lock);
+       br_port_state_selection(p->br);
+       spin_unlock_bh(&p->br->lock);
+
        br_ifinfo_notify(RTM_NEWLINK, p);
 
        return 0;
index d7d6fb05411f28715615375982cb51fcbd452dd2..0b67a63ad7a870e550c9d23d03ed7eb52ef854bf 100644 (file)
@@ -56,7 +56,7 @@ struct br_ip
 {
        union {
                __be32  ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct in6_addr ip6;
 #endif
        } u;
@@ -348,6 +348,7 @@ extern void br_fdb_fini(void);
 extern void br_fdb_flush(struct net_bridge *br);
 extern void br_fdb_changeaddr(struct net_bridge_port *p,
                              const unsigned char *newaddr);
+extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
 extern void br_fdb_cleanup(unsigned long arg);
 extern void br_fdb_delete_by_port(struct net_bridge *br,
                                  const struct net_bridge_port *p, int do_all);
@@ -387,7 +388,8 @@ extern int br_add_if(struct net_bridge *br,
 extern int br_del_if(struct net_bridge *br,
              struct net_device *dev);
 extern int br_min_mtu(const struct net_bridge *br);
-extern u32 br_features_recompute(struct net_bridge *br, u32 features);
+extern netdev_features_t br_features_recompute(struct net_bridge *br,
+       netdev_features_t features);
 
 /* br_input.c */
 extern int br_handle_frame_finish(struct sk_buff *skb);
@@ -535,7 +537,7 @@ extern void br_stp_port_timer_init(struct net_bridge_port *p);
 extern unsigned long br_timer_value(const struct timer_list *timer);
 
 /* br.c */
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr);
 #endif
 
index ad0a3f7cf6cc73081ca600b241060ce5ef867e44..dd147d78a5889ab6c2139712199c3672f9e087c6 100644 (file)
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
        struct net_bridge_port *p;
        unsigned int liveports = 0;
 
-       /* Don't change port states if userspace is handling STP */
-       if (br->stp_enabled == BR_USER_STP)
-               return;
-
        list_for_each_entry(p, &br->port_list, list) {
                if (p->state == BR_STATE_DISABLED)
                        continue;
 
-               if (p->port_no == br->root_port) {
-                       p->config_pending = 0;
-                       p->topology_change_ack = 0;
-                       br_make_forwarding(p);
-               } else if (br_is_designated_port(p)) {
-                       del_timer(&p->message_age_timer);
-                       br_make_forwarding(p);
-               } else {
-                       p->config_pending = 0;
-                       p->topology_change_ack = 0;
-                       br_make_blocking(p);
+               /* Don't change port states if userspace is handling STP */
+               if (br->stp_enabled != BR_USER_STP) {
+                       if (p->port_no == br->root_port) {
+                               p->config_pending = 0;
+                               p->topology_change_ack = 0;
+                               br_make_forwarding(p);
+                       } else if (br_is_designated_port(p)) {
+                               del_timer(&p->message_age_timer);
+                               br_make_forwarding(p);
+                       } else {
+                               p->config_pending = 0;
+                               p->topology_change_ack = 0;
+                               br_make_blocking(p);
+                       }
                }
 
                if (p->state == BR_STATE_FORWARDING)
index 2ed0056a39a88afb27beac4fc0e57c3c00bae347..99c85668f5518ab9ee8d05a7b085139734893fdd 100644 (file)
@@ -55,9 +55,10 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
                return false;
        if (info->bitmask & EBT_IP6_PROTO) {
                uint8_t nexthdr = ih6->nexthdr;
+               __be16 frag_off;
                int offset_ph;
 
-               offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr);
+               offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
                if (offset_ph == -1)
                        return false;
                if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
index 6e5a8bb9b940ce3eb03e1dc6840f4e84ef4d1f98..f88ee537fb2b811347c109cfa19dbdd34c2923c0 100644 (file)
@@ -107,12 +107,13 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
                goto out;
        }
 
-#if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE_EBT_IP6)
        if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
           htons(ETH_P_IPV6)) {
                const struct ipv6hdr *ih;
                struct ipv6hdr _iph;
                uint8_t nexthdr;
+               __be16 frag_off;
                int offset_ph;
 
                ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
@@ -123,7 +124,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
                printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
                       &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
                nexthdr = ih->nexthdr;
-               offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr);
+               offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
                if (offset_ph == -1)
                        goto out;
                print_ports(skb, nexthdr, offset_ph);
index 529750da962442a54491a9a88070cb2b136af590..936361e5a2b66f109156d8ac646f01ee2ed88027 100644 (file)
@@ -40,3 +40,14 @@ config CAIF_NETDEV
        If you select to build it as a built-in then the main CAIF device must
        also be a built-in.
        If unsure say Y.
+
+config CAIF_USB
+       tristate "CAIF USB support"
+       depends on CAIF
+       default n
+       ---help---
+       Say Y if you are using CAIF over USB CDC NCM.
+       This can be either built-in or a loadable module,
+       If you select to build it as a built-in then the main CAIF device must
+       also be a built-in.
+       If unsure say N.
index ebcd4e7e6f47ba55479d41285baa2281fb2dff85..cc2b51154d039156c8b0795c4387a4cebdd2cf8c 100644 (file)
@@ -10,5 +10,6 @@ caif-y := caif_dev.o \
 obj-$(CONFIG_CAIF) += caif.o
 obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
 obj-$(CONFIG_CAIF) += caif_socket.o
+obj-$(CONFIG_CAIF_USB) += caif_usb.o
 
 export-y := caif.o
index f1fa1f6e658d4dc84ca6eea2abceb6be6d3e8283..b0ce14fbf6ef7fee0f51e4d3e1b4c06637ff3b42 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/netdevice.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
 #include <net/netns/generic.h>
 #include <net/net_namespace.h>
 #include <net/pkt_sched.h>
@@ -24,6 +25,7 @@
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfcnfg.h>
+#include <net/caif/cfserl.h>
 
 MODULE_LICENSE("GPL");
 
@@ -33,6 +35,10 @@ struct caif_device_entry {
        struct list_head list;
        struct net_device *netdev;
        int __percpu *pcpu_refcnt;
+       spinlock_t flow_lock;
+       struct sk_buff *xoff_skb;
+       void (*xoff_skb_dtor)(struct sk_buff *skb);
+       bool xoff;
 };
 
 struct caif_device_entry_list {
@@ -47,13 +53,14 @@ struct caif_net {
 };
 
 static int caif_net_id;
+static int q_high = 50; /* Percent */
 
 struct cfcnfg *get_cfcnfg(struct net *net)
 {
        struct caif_net *caifn;
-       BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+       if (!caifn)
+               return NULL;
        return caifn->cfg;
 }
 EXPORT_SYMBOL(get_cfcnfg);
@@ -61,9 +68,9 @@ EXPORT_SYMBOL(get_cfcnfg);
 static struct caif_device_entry_list *caif_device_list(struct net *net)
 {
        struct caif_net *caifn;
-       BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+       if (!caifn)
+               return NULL;
        return &caifn->caifdevs;
 }
 
@@ -92,7 +99,8 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
        struct caif_device_entry *caifd;
 
        caifdevs = caif_device_list(dev_net(dev));
-       BUG_ON(!caifdevs);
+       if (!caifdevs)
+               return NULL;
 
        caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
        if (!caifd)
@@ -112,7 +120,9 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        struct caif_device_entry_list *caifdevs =
            caif_device_list(dev_net(dev));
        struct caif_device_entry *caifd;
-       BUG_ON(!caifdevs);
+       if (!caifdevs)
+               return NULL;
+
        list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
                if (caifd->netdev == dev)
                        return caifd;
@@ -120,15 +130,106 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        return NULL;
 }
 
+void caif_flow_cb(struct sk_buff *skb)
+{
+       struct caif_device_entry *caifd;
+       void (*dtor)(struct sk_buff *skb) = NULL;
+       bool send_xoff;
+
+       WARN_ON(skb->dev == NULL);
+
+       rcu_read_lock();
+       caifd = caif_get(skb->dev);
+       caifd_hold(caifd);
+       rcu_read_unlock();
+
+       spin_lock_bh(&caifd->flow_lock);
+       send_xoff = caifd->xoff;
+       caifd->xoff = 0;
+       if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
+               WARN_ON(caifd->xoff_skb != skb);
+               dtor = caifd->xoff_skb_dtor;
+               caifd->xoff_skb = NULL;
+               caifd->xoff_skb_dtor = NULL;
+       }
+       spin_unlock_bh(&caifd->flow_lock);
+
+       if (dtor)
+               dtor(skb);
+
+       if (send_xoff)
+               caifd->layer.up->
+                       ctrlcmd(caifd->layer.up,
+                               _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
+                               caifd->layer.id);
+       caifd_put(caifd);
+}
+
 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
 {
-       int err;
+       int err, high = 0, qlen = 0;
+       struct caif_dev_common *caifdev;
        struct caif_device_entry *caifd =
            container_of(layer, struct caif_device_entry, layer);
        struct sk_buff *skb;
+       struct netdev_queue *txq;
+
+       rcu_read_lock_bh();
 
        skb = cfpkt_tonative(pkt);
        skb->dev = caifd->netdev;
+       skb_reset_network_header(skb);
+       skb->protocol = htons(ETH_P_CAIF);
+       caifdev = netdev_priv(caifd->netdev);
+
+       /* Check if we need to handle xoff */
+       if (likely(caifd->netdev->tx_queue_len == 0))
+               goto noxoff;
+
+       if (unlikely(caifd->xoff))
+               goto noxoff;
+
+       if (likely(!netif_queue_stopped(caifd->netdev))) {
+               /* If we run with a TX queue, check if the queue is too long*/
+               txq = netdev_get_tx_queue(skb->dev, 0);
+               qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
+
+               if (likely(qlen == 0))
+                       goto noxoff;
+
+               high = (caifd->netdev->tx_queue_len * q_high) / 100;
+               if (likely(qlen < high))
+                       goto noxoff;
+       }
+
+       /* Hold lock while accessing xoff */
+       spin_lock_bh(&caifd->flow_lock);
+       if (caifd->xoff) {
+               spin_unlock_bh(&caifd->flow_lock);
+               goto noxoff;
+       }
+
+       /*
+        * Handle flow off, we do this by temporary hi-jacking this
+        * skb's destructor function, and replace it with our own
+        * flow-on callback. The callback will set flow-on and call
+        * the original destructor.
+        */
+
+       pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
+                       netif_queue_stopped(caifd->netdev),
+                       qlen, high);
+       caifd->xoff = 1;
+       caifd->xoff_skb = skb;
+       caifd->xoff_skb_dtor = skb->destructor;
+       skb->destructor = caif_flow_cb;
+       spin_unlock_bh(&caifd->flow_lock);
+
+       caifd->layer.up->ctrlcmd(caifd->layer.up,
+                                       _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+                                       caifd->layer.id);
+noxoff:
+       rcu_read_unlock_bh();
 
        err = dev_queue_xmit(skb);
        if (err > 0)
@@ -172,7 +273,10 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
 
        /* Release reference to stack upwards */
        caifd_put(caifd);
-       return 0;
+
+       if (err != 0)
+               err = NET_RX_DROP;
+       return err;
 }
 
 static struct packet_type caif_packet_type __read_mostly = {
@@ -203,6 +307,57 @@ static void dev_flowctrl(struct net_device *dev, int on)
        caifd_put(caifd);
 }
 
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+                       struct cflayer *link_support, int head_room,
+                       struct cflayer **layer, int (**rcv_func)(
+                               struct sk_buff *, struct net_device *,
+                               struct packet_type *, struct net_device *))
+{
+       struct caif_device_entry *caifd;
+       enum cfcnfg_phy_preference pref;
+       struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
+       struct caif_device_entry_list *caifdevs;
+
+       caifdevs = caif_device_list(dev_net(dev));
+       if (!cfg || !caifdevs)
+               return;
+       caifd = caif_device_alloc(dev);
+       if (!caifd)
+               return;
+       *layer = &caifd->layer;
+       spin_lock_init(&caifd->flow_lock);
+
+       switch (caifdev->link_select) {
+       case CAIF_LINK_HIGH_BANDW:
+               pref = CFPHYPREF_HIGH_BW;
+               break;
+       case CAIF_LINK_LOW_LATENCY:
+               pref = CFPHYPREF_LOW_LAT;
+               break;
+       default:
+               pref = CFPHYPREF_HIGH_BW;
+               break;
+       }
+       mutex_lock(&caifdevs->lock);
+       list_add_rcu(&caifd->list, &caifdevs->list);
+
+       strncpy(caifd->layer.name, dev->name,
+               sizeof(caifd->layer.name) - 1);
+       caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+       caifd->layer.transmit = transmit;
+       cfcnfg_add_phy_layer(cfg,
+                               dev,
+                               &caifd->layer,
+                               pref,
+                               link_support,
+                               caifdev->use_fcs,
+                               head_room);
+       mutex_unlock(&caifdevs->lock);
+       if (rcv_func)
+               *rcv_func = receive;
+}
+EXPORT_SYMBOL(caif_enroll_dev);
+
 /* notify Caif of device events */
 static int caif_device_notify(struct notifier_block *me, unsigned long what,
                              void *arg)
@@ -210,62 +365,40 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
        struct net_device *dev = arg;
        struct caif_device_entry *caifd = NULL;
        struct caif_dev_common *caifdev;
-       enum cfcnfg_phy_preference pref;
-       enum cfcnfg_phy_type phy_type;
        struct cfcnfg *cfg;
+       struct cflayer *layer, *link_support;
+       int head_room = 0;
        struct caif_device_entry_list *caifdevs;
 
-       if (dev->type != ARPHRD_CAIF)
-               return 0;
-
        cfg = get_cfcnfg(dev_net(dev));
-       if (cfg == NULL)
+       caifdevs = caif_device_list(dev_net(dev));
+       if (!cfg || !caifdevs)
                return 0;
 
-       caifdevs = caif_device_list(dev_net(dev));
+       caifd = caif_get(dev);
+       if (caifd == NULL && dev->type != ARPHRD_CAIF)
+               return 0;
 
        switch (what) {
        case NETDEV_REGISTER:
-               caifd = caif_device_alloc(dev);
-               if (!caifd)
-                       return 0;
+               if (caifd != NULL)
+                       break;
 
                caifdev = netdev_priv(dev);
-               caifdev->flowctrl = dev_flowctrl;
 
-               caifd->layer.transmit = transmit;
-
-               if (caifdev->use_frag)
-                       phy_type = CFPHYTYPE_FRAG;
-               else
-                       phy_type = CFPHYTYPE_CAIF;
-
-               switch (caifdev->link_select) {
-               case CAIF_LINK_HIGH_BANDW:
-                       pref = CFPHYPREF_HIGH_BW;
-                       break;
-               case CAIF_LINK_LOW_LATENCY:
-                       pref = CFPHYPREF_LOW_LAT;
-                       break;
-               default:
-                       pref = CFPHYPREF_HIGH_BW;
-                       break;
+               link_support = NULL;
+               if (caifdev->use_frag) {
+                       head_room = 1;
+                       link_support = cfserl_create(dev->ifindex,
+                                                       caifdev->use_stx);
+                       if (!link_support) {
+                               pr_warn("Out of memory\n");
+                               break;
+                       }
                }
-               strncpy(caifd->layer.name, dev->name,
-                       sizeof(caifd->layer.name) - 1);
-               caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
-
-               mutex_lock(&caifdevs->lock);
-               list_add_rcu(&caifd->list, &caifdevs->list);
-
-               cfcnfg_add_phy_layer(cfg,
-                                    phy_type,
-                                    dev,
-                                    &caifd->layer,
-                                    pref,
-                                    caifdev->use_fcs,
-                                    caifdev->use_stx);
-               mutex_unlock(&caifdevs->lock);
+               caif_enroll_dev(dev, caifdev, link_support, head_room,
+                               &layer, NULL);
+               caifdev->flowctrl = dev_flowctrl;
                break;
 
        case NETDEV_UP:
@@ -277,6 +410,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                        break;
                }
 
+               caifd->xoff = 0;
                cfcnfg_set_phy_state(cfg, &caifd->layer, true);
                rcu_read_unlock();
 
@@ -298,6 +432,24 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                caifd->layer.up->ctrlcmd(caifd->layer.up,
                                         _CAIF_CTRLCMD_PHYIF_DOWN_IND,
                                         caifd->layer.id);
+
+               spin_lock_bh(&caifd->flow_lock);
+
+               /*
+                * Replace our xoff-destructor with original destructor.
+                * We trust that skb->destructor *always* is called before
+                * the skb reference is invalid. The hijacked SKB destructor
+                * takes the flow_lock so manipulating the skb->destructor here
+                * should be safe.
+               */
+               if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
+                       caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
+
+               caifd->xoff = 0;
+               caifd->xoff_skb_dtor = NULL;
+               caifd->xoff_skb = NULL;
+
+               spin_unlock_bh(&caifd->flow_lock);
                caifd_put(caifd);
                break;
 
@@ -353,15 +505,15 @@ static struct notifier_block caif_device_notifier = {
 static int caif_init_net(struct net *net)
 {
        struct caif_net *caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+       if (WARN_ON(!caifn))
+               return -EINVAL;
+
        INIT_LIST_HEAD(&caifn->caifdevs.list);
        mutex_init(&caifn->caifdevs.lock);
 
        caifn->cfg = cfcnfg_create();
-       if (!caifn->cfg) {
-               pr_warn("can't create cfcnfg\n");
+       if (!caifn->cfg)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -371,17 +523,14 @@ static void caif_exit_net(struct net *net)
        struct caif_device_entry *caifd, *tmp;
        struct caif_device_entry_list *caifdevs =
            caif_device_list(net);
-       struct cfcnfg *cfg;
+       struct cfcnfg *cfg =  get_cfcnfg(net);
+
+       if (!cfg || !caifdevs)
+               return;
 
        rtnl_lock();
        mutex_lock(&caifdevs->lock);
 
-       cfg = get_cfcnfg(net);
-       if (cfg == NULL) {
-               mutex_unlock(&caifdevs->lock);
-               return;
-       }
-
        list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
                int i = 0;
                list_del_rcu(&caifd->list);
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
new file mode 100644 (file)
index 0000000..5fc9eca
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * CAIF USB handler
+ * Copyright (C) ST-Ericsson AB 2011
+ * Author:     Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+#include <net/netns/generic.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+
+#define CFUSB_PAD_DESCR_SZ 1   /* Alignment descriptor length */
+#define CFUSB_ALIGNMENT 4      /* Number of bytes to align. */
+#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
+#define STE_USB_VID 0x04cc     /* USB Product ID for ST-Ericsson */
+#define STE_USB_PID_CAIF 0x2306        /* Product id for CAIF Modems */
+
+struct cfusbl {
+       struct cflayer layer;
+       u8 tx_eth_hdr[ETH_HLEN];
+};
+
+static bool pack_added;
+
+static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+       u8 hpad;
+
+       /* Remove padding. */
+       cfpkt_extr_head(pkt, &hpad, 1);
+       cfpkt_extr_head(pkt, NULL, hpad);
+       return layr->up->receive(layr->up, pkt);
+}
+
+static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+       struct caif_payload_info *info;
+       u8 hpad;
+       u8 zeros[CFUSB_ALIGNMENT];
+       struct sk_buff *skb;
+       struct cfusbl *usbl = container_of(layr, struct cfusbl, layer);
+
+       skb = cfpkt_tonative(pkt);
+
+       skb_reset_network_header(skb);
+       skb->protocol = htons(ETH_P_IP);
+
+       info = cfpkt_info(pkt);
+       hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
+
+       if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
+               pr_warn("Headroom to small\n");
+               kfree_skb(skb);
+               return -EIO;
+       }
+       memset(zeros, 0, hpad);
+
+       cfpkt_add_head(pkt, zeros, hpad);
+       cfpkt_add_head(pkt, &hpad, 1);
+       cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr));
+       return layr->dn->transmit(layr->dn, pkt);
+}
+
+static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+                                       int phyid)
+{
+       if (layr->up && layr->up->ctrlcmd)
+               layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
+
+struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+                                       u8 braddr[ETH_ALEN])
+{
+       struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
+
+       if (!this) {
+               pr_warn("Out of memory\n");
+               return NULL;
+       }
+       caif_assert(offsetof(struct cfusbl, layer) == 0);
+
+       memset(this, 0, sizeof(struct cflayer));
+       this->layer.receive = cfusbl_receive;
+       this->layer.transmit = cfusbl_transmit;
+       this->layer.ctrlcmd = cfusbl_ctrlcmd;
+       snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid);
+       this->layer.id = phyid;
+
+       /*
+        * Construct TX ethernet header:
+        *      0-5     destination address
+        *      5-11    source address
+        *      12-13   protocol type
+        */
+       memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN);
+       memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN);
+       this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff;
+       this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff;
+       pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n",
+                       this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN,
+                       this->tx_eth_hdr[12], this->tx_eth_hdr[13]);
+
+       return (struct cflayer *) this;
+}
+
+static struct packet_type caif_usb_type __read_mostly = {
+       .type = cpu_to_be16(ETH_P_802_EX1),
+};
+
+static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+                             void *arg)
+{
+       struct net_device *dev = arg;
+       struct caif_dev_common common;
+       struct cflayer *layer, *link_support;
+       struct usbnet   *usbnet = netdev_priv(dev);
+       struct usb_device       *usbdev = usbnet->udev;
+       struct ethtool_drvinfo drvinfo;
+
+       /*
+        * Quirks: High-jack ethtool to find if we have a NCM device,
+        * and find it's VID/PID.
+        */
+       if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL)
+               return 0;
+
+       dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+       if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0)
+               return 0;
+
+       pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n",
+               le16_to_cpu(usbdev->descriptor.idVendor),
+               le16_to_cpu(usbdev->descriptor.idProduct));
+
+       /* Check for VID/PID that supports CAIF */
+       if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID &&
+               le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF))
+               return 0;
+
+       if (what == NETDEV_UNREGISTER)
+               module_put(THIS_MODULE);
+
+       if (what != NETDEV_REGISTER)
+               return 0;
+
+       __module_get(THIS_MODULE);
+
+       memset(&common, 0, sizeof(common));
+       common.use_frag = false;
+       common.use_fcs = false;
+       common.use_stx = false;
+       common.link_select = CAIF_LINK_HIGH_BANDW;
+       common.flowctrl = NULL;
+
+       link_support = cfusbl_create(dev->ifindex, dev->dev_addr,
+                                       dev->broadcast);
+
+       if (!link_support)
+               return -ENOMEM;
+
+       if (dev->num_tx_queues > 1)
+               pr_warn("USB device uses more than one tx queue\n");
+
+       caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+                       &layer, &caif_usb_type.func);
+       if (!pack_added)
+               dev_add_pack(&caif_usb_type);
+       pack_added = true;
+
+       strncpy(layer->name, dev->name,
+                       sizeof(layer->name) - 1);
+       layer->name[sizeof(layer->name) - 1] = 0;
+
+       return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+       .notifier_call = cfusbl_device_notify,
+       .priority = 0,
+};
+
+static int __init cfusbl_init(void)
+{
+       return register_netdevice_notifier(&caif_device_notifier);
+}
+
+static void __exit cfusbl_exit(void)
+{
+       unregister_netdevice_notifier(&caif_device_notifier);
+       dev_remove_pack(&caif_usb_type);
+}
+
+module_init(cfusbl_init);
+module_exit(cfusbl_exit);
index 00523ecc4ced75ebaa1059c1eb3e0a324cae182e..598aafb4cb5169e799148ffd6b33d351b702be7d 100644 (file)
@@ -45,8 +45,8 @@ struct cfcnfg_phyinfo {
        /* Interface index */
        int ifindex;
 
-       /* Use Start of frame extension */
-       bool use_stx;
+       /* Protocol head room added for CAIF link layer */
+       int head_room;
 
        /* Use Start of frame checksum */
        bool use_fcs;
@@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
        if (channel_id != 0) {
                struct cflayer *servl;
                servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
+               cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
                if (servl != NULL)
                        layer_set_up(servl, NULL);
        } else
                pr_debug("nothing to disconnect\n");
-       cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
 
        /* Do RCU sync before initiating cleanup */
        synchronize_rcu();
@@ -350,9 +350,7 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
 
        *ifindex = phy->ifindex;
        *proto_tail = 2;
-       *proto_head =
-
-       protohead[param.linktype] + (phy->use_stx ? 1 : 0);
+       *proto_head = protohead[param.linktype] + phy->head_room;
 
        rcu_read_unlock();
 
@@ -460,13 +458,13 @@ unlock:
 }
 
 void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
-                    bool fcs, bool stx)
+                    struct cflayer *link_support,
+                    bool fcs, int head_room)
 {
        struct cflayer *frml;
-       struct cflayer *phy_driver = NULL;
        struct cfcnfg_phyinfo *phyinfo = NULL;
        int i;
        u8 phyid;
@@ -482,26 +480,13 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
                        goto got_phyid;
        }
        pr_warn("Too many CAIF Link Layers (max 6)\n");
-       goto out_err;
+       goto out;
 
 got_phyid:
        phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
        if (!phyinfo)
                goto out_err;
 
-       switch (phy_type) {
-       case CFPHYTYPE_FRAG:
-               phy_driver =
-                   cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
-               if (!phy_driver)
-                       goto out_err;
-               break;
-       case CFPHYTYPE_CAIF:
-               phy_driver = NULL;
-               break;
-       default:
-               goto out_err;
-       }
        phy_layer->id = phyid;
        phyinfo->pref = pref;
        phyinfo->id = phyid;
@@ -509,7 +494,7 @@ got_phyid:
        phyinfo->dev_info.dev = dev;
        phyinfo->phy_layer = phy_layer;
        phyinfo->ifindex = dev->ifindex;
-       phyinfo->use_stx = stx;
+       phyinfo->head_room = head_room;
        phyinfo->use_fcs = fcs;
 
        frml = cffrml_create(phyid, fcs);
@@ -519,23 +504,23 @@ got_phyid:
        phyinfo->frm_layer = frml;
        layer_set_up(frml, cnfg->mux);
 
-       if (phy_driver != NULL) {
-               phy_driver->id = phyid;
-               layer_set_dn(frml, phy_driver);
-               layer_set_up(phy_driver, frml);
-               layer_set_dn(phy_driver, phy_layer);
-               layer_set_up(phy_layer, phy_driver);
+       if (link_support != NULL) {
+               link_support->id = phyid;
+               layer_set_dn(frml, link_support);
+               layer_set_up(link_support, frml);
+               layer_set_dn(link_support, phy_layer);
+               layer_set_up(phy_layer, link_support);
        } else {
                layer_set_dn(frml, phy_layer);
                layer_set_up(phy_layer, frml);
        }
 
        list_add_rcu(&phyinfo->node, &cnfg->phys);
+out:
        mutex_unlock(&cnfg->lock);
        return;
 
 out_err:
-       kfree(phy_driver);
        kfree(phyinfo);
        mutex_unlock(&cnfg->lock);
 }
index f39921171d0d94e51e1270891a6703be598d01e9..d3ca87bf23b7ff952a428b367f6ad525382e6740 100644 (file)
@@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
 
 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
 {
-       int tmp;
        u16 chks;
        u16 len;
+       __le16 data;
+
        struct cffrml *this = container_obj(layr);
        if (this->dofcs) {
                chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
-               tmp = cpu_to_le16(chks);
-               cfpkt_add_trail(pkt, &tmp, 2);
+               data = cpu_to_le16(chks);
+               cfpkt_add_trail(pkt, &data, 2);
        } else {
                cfpkt_pad_trail(pkt, 2);
        }
        len = cfpkt_getlen(pkt);
-       tmp = cpu_to_le16(len);
-       cfpkt_add_head(pkt, &tmp, 2);
+       data = cpu_to_le16(len);
+       cfpkt_add_head(pkt, &data, 2);
        cfpkt_info(pkt)->hdr_len += 2;
        if (cfpkt_erroneous(pkt)) {
                pr_err("Packet is erroneous!\n");
index df08c47183d477b96bf26cb026043f17c1abcf64..e335ba859b970a24d6dc785451c9b9bb8b87e2ba 100644 (file)
@@ -63,7 +63,6 @@ static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
        return (struct cfpkt *) skb;
 }
 
-
 struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
 {
        struct cfpkt *pkt = skb_to_pkt(nativepkt);
@@ -105,14 +104,12 @@ void cfpkt_destroy(struct cfpkt *pkt)
        kfree_skb(skb);
 }
 
-
 inline bool cfpkt_more(struct cfpkt *pkt)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
        return skb->len > 0;
 }
 
-
 int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -144,9 +141,11 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
        }
        from = skb_pull(skb, len);
        from -= len;
-       memcpy(data, from, len);
+       if (data)
+               memcpy(data, from, len);
        return 0;
 }
+EXPORT_SYMBOL(cfpkt_extr_head);
 
 int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
 {
@@ -170,13 +169,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
        return 0;
 }
 
-
 int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
 {
        return cfpkt_add_body(pkt, NULL, len);
 }
 
-
 int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -255,21 +252,19 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
        memcpy(to, data, len);
        return 0;
 }
-
+EXPORT_SYMBOL(cfpkt_add_head);
 
 inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
 {
        return cfpkt_add_body(pkt, data, len);
 }
 
-
 inline u16 cfpkt_getlen(struct cfpkt *pkt)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
        return skb->len;
 }
 
-
 inline u16 cfpkt_iterate(struct cfpkt *pkt,
                            u16 (*iter_func)(u16, void *, u16),
                            u16 data)
@@ -287,7 +282,6 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
        return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
 }
 
-
 int cfpkt_setlen(struct cfpkt *pkt, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -399,3 +393,4 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
 {
        return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
 }
+EXPORT_SYMBOL(cfpkt_info);
index 81660f8097131f2c7cb952e7a1f6d51ba2ce3a40..6dc75d4f8d942a183123b8720fdfe42c1e42888d 100644 (file)
@@ -190,7 +190,7 @@ out:
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-       caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
+       caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
 
        /* Add info for MUX-layer to route the packet out. */
        cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
index 797c8d165993f556b954b6c7aea9fd31540adbe0..8e68b97f13ee71c23186fac06573590ae3664f78 100644 (file)
@@ -31,7 +31,7 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                                int phyid);
 
-struct cflayer *cfserl_create(int type, int instance, bool use_stx)
+struct cflayer *cfserl_create(int instance, bool use_stx)
 {
        struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
        if (!this)
@@ -40,7 +40,6 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
        this->layer.receive = cfserl_receive;
        this->layer.transmit = cfserl_transmit;
        this->layer.ctrlcmd = cfserl_ctrlcmd;
-       this->layer.type = type;
        this->usestx = use_stx;
        spin_lock_init(&this->sync);
        snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
index 42599e31dcad8a6ceb1f3b29d9171f6a249e56a3..3a94eae7abe924d8d73d454b5a9330ab50bda968 100644 (file)
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
        int i, j;
        int numrep;
        int firstn;
-       int rc = -1;
 
        BUG_ON(ruleno >= map->max_rules);
 
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
         * that this may or may not correspond to the specific types
         * referenced by the crush rule.
         */
-       if (force >= 0) {
-               if (force >= map->max_devices ||
-                   map->device_parents[force] == 0) {
-                       /*dprintk("CRUSH: forcefed device dne\n");*/
-                       rc = -1;  /* force fed device dne */
-                       goto out;
-               }
-               if (!is_out(map, weight, force, x)) {
-                       while (1) {
-                               force_context[++force_pos] = force;
-                               if (force >= 0)
-                                       force = map->device_parents[force];
-                               else
-                                       force = map->bucket_parents[-1-force];
-                               if (force == 0)
-                                       break;
-                       }
+       if (force >= 0 &&
+           force < map->max_devices &&
+           map->device_parents[force] != 0 &&
+           !is_out(map, weight, force, x)) {
+               while (1) {
+                       force_context[++force_pos] = force;
+                       if (force >= 0)
+                               force = map->device_parents[force];
+                       else
+                               force = map->bucket_parents[-1-force];
+                       if (force == 0)
+                               break;
                }
        }
 
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
                        BUG_ON(1);
                }
        }
-       rc = result_len;
-
-out:
-       return rc;
+       return result_len;
 }
 
 
index 733e46008b89d6e67397d779edf9ccb5cb3e5c8a..f4f3f58f5234c1a5ce5eb4b54638c730f571ffb9 100644 (file)
@@ -244,7 +244,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
                ceph_pagelist_init(req->r_trail);
        }
        /* create request message; allow space for oid */
-       msg_size += 40;
+       msg_size += MAX_OBJ_NAME_SIZE;
        if (snapc)
                msg_size += sizeof(u64) * snapc->num_snaps;
        if (use_mempool)
index 0d357b1c4e57db42d6a1938c0a7a870455fa7b1b..674641b13aea341257094fa4f070bd4289075d9b 100644 (file)
@@ -3,12 +3,13 @@
 #
 
 obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
-        gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
+        gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 
 obj-y               += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
-                       neighbour.o rtnetlink.o utils.o link_watch.o filter.o
+                       neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
+                       sock_diag.o
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-y += net-sysfs.o
@@ -19,3 +20,4 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
+obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
index 6ba50a1e404c4bac04cc7d56718865d5a1749c2f..f494675471a91b7f093665e6098ee7b3a91e6e4e 100644 (file)
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
-#include <linux/if_tunnel.h>
-#include <linux/if_pppox.h>
-#include <linux/ppp_defs.h>
 #include <linux/net_tstamp.h>
+#include <linux/jump_label.h>
+#include <net/flow_keys.h>
 
 #include "net-sysfs.h"
 
@@ -1320,8 +1319,6 @@ EXPORT_SYMBOL(dev_close);
  */
 void dev_disable_lro(struct net_device *dev)
 {
-       u32 flags;
-
        /*
         * If we're trying to disable lro on a vlan device
         * use the underlying physical device instead
@@ -1329,15 +1326,9 @@ void dev_disable_lro(struct net_device *dev)
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
 
-       if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
-               flags = dev->ethtool_ops->get_flags(dev);
-       else
-               flags = ethtool_op_get_flags(dev);
+       dev->wanted_features &= ~NETIF_F_LRO;
+       netdev_update_features(dev);
 
-       if (!(flags & ETH_FLAG_LRO))
-               return;
-
-       __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
        if (unlikely(dev->features & NETIF_F_LRO))
                netdev_WARN(dev, "failed to disable LRO!\n");
 }
@@ -1396,7 +1387,7 @@ rollback:
        for_each_net(net) {
                for_each_netdev(net, dev) {
                        if (dev == last)
-                               break;
+                               goto outroll;
 
                        if (dev->flags & IFF_UP) {
                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1407,6 +1398,7 @@ rollback:
                }
        }
 
+outroll:
        raw_notifier_chain_unregister(&netdev_chain, nb);
        goto unlock;
 }
@@ -1449,34 +1441,55 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-/* When > 0 there are consumers of rx skb time stamps */
-static atomic_t netstamp_needed = ATOMIC_INIT(0);
+static struct jump_label_key netstamp_needed __read_mostly;
+#ifdef HAVE_JUMP_LABEL
+/* We are not allowed to call jump_label_dec() from irq context
+ * If net_disable_timestamp() is called from irq context, defer the
+ * jump_label_dec() calls.
+ */
+static atomic_t netstamp_needed_deferred;
+#endif
 
 void net_enable_timestamp(void)
 {
-       atomic_inc(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+       int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+
+       if (deferred) {
+               while (--deferred)
+                       jump_label_dec(&netstamp_needed);
+               return;
+       }
+#endif
+       WARN_ON(in_interrupt());
+       jump_label_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
 void net_disable_timestamp(void)
 {
-       atomic_dec(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+       if (in_interrupt()) {
+               atomic_inc(&netstamp_needed_deferred);
+               return;
+       }
+#endif
+       jump_label_dec(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
 static inline void net_timestamp_set(struct sk_buff *skb)
 {
-       if (atomic_read(&netstamp_needed))
+       skb->tstamp.tv64 = 0;
+       if (static_branch(&netstamp_needed))
                __net_timestamp(skb);
-       else
-               skb->tstamp.tv64 = 0;
 }
 
-static inline void net_timestamp_check(struct sk_buff *skb)
-{
-       if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
-               __net_timestamp(skb);
-}
+#define net_timestamp_check(COND, SKB)                 \
+       if (static_branch(&netstamp_needed)) {          \
+               if ((COND) && !(SKB)->tstamp.tv64)      \
+                       __net_timestamp(SKB);           \
+       }                                               \
 
 static int net_hwtstamp_validate(struct ifreq *ifr)
 {
@@ -1923,7 +1936,8 @@ EXPORT_SYMBOL(skb_checksum_help);
  *     It may return NULL if the skb requires no segmentation.  This is
  *     only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
@@ -1953,9 +1967,9 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
                if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
                        dev->ethtool_ops->get_drvinfo(dev, &info);
 
-               WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
-                    info.driver, dev ? dev->features : 0L,
-                    skb->sk ? skb->sk->sk_route_caps : 0L,
+               WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
+                    info.driver, dev ? &dev->features : NULL,
+                    skb->sk ? &skb->sk->sk_route_caps : NULL,
                     skb->len, skb->data_len, skb->ip_summed);
 
                if (skb_header_cloned(skb) &&
@@ -2064,7 +2078,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
  *     This function segments the given skb and stores the list of segments
  *     in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb, int features)
+static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 {
        struct sk_buff *segs;
 
@@ -2103,7 +2117,7 @@ static inline void skb_orphan_try(struct sk_buff *skb)
        }
 }
 
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
        return ((features & NETIF_F_GEN_CSUM) ||
                ((features & NETIF_F_V4_CSUM) &&
@@ -2114,7 +2128,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
                 protocol == htons(ETH_P_FCOE)));
 }
 
-static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+static netdev_features_t harmonize_features(struct sk_buff *skb,
+       __be16 protocol, netdev_features_t features)
 {
        if (!can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
@@ -2126,10 +2141,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features
        return features;
 }
 
-u32 netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       u32 features = skb->dev->features;
+       netdev_features_t features = skb->dev->features;
 
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2175,7 +2190,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int skb_len;
 
        if (likely(!skb->next)) {
-               u32 features;
+               netdev_features_t features;
 
                /*
                 * If device doesn't need skb->dst, release it right now while
@@ -2256,7 +2271,7 @@ gso:
                        return rc;
                }
                txq_trans_update(txq);
-               if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+               if (unlikely(netif_xmit_stopped(txq) && skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
 
@@ -2456,6 +2471,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        return rc;
 }
 
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+static void skb_update_prio(struct sk_buff *skb)
+{
+       struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+
+       if ((!skb->priority) && (skb->sk) && map)
+               skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+}
+#else
+#define skb_update_prio(skb)
+#endif
+
 static DEFINE_PER_CPU(int, xmit_recursion);
 #define RECURSION_LIMIT 10
 
@@ -2496,6 +2523,8 @@ int dev_queue_xmit(struct sk_buff *skb)
         */
        rcu_read_lock_bh();
 
+       skb_update_prio(skb);
+
        txq = dev_pick_tx(dev, skb);
        q = rcu_dereference_bh(txq->qdisc);
 
@@ -2530,7 +2559,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 
                        HARD_TX_LOCK(dev, txq, cpu);
 
-                       if (!netif_tx_queue_stopped(txq)) {
+                       if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
                                rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
@@ -2591,123 +2620,28 @@ static inline void ____napi_schedule(struct softnet_data *sd,
  */
 void __skb_get_rxhash(struct sk_buff *skb)
 {
-       int nhoff, hash = 0, poff;
-       const struct ipv6hdr *ip6;
-       const struct iphdr *ip;
-       const struct vlan_hdr *vlan;
-       u8 ip_proto;
-       u32 addr1, addr2;
-       u16 proto;
-       union {
-               u32 v32;
-               u16 v16[2];
-       } ports;
-
-       nhoff = skb_network_offset(skb);
-       proto = skb->protocol;
-
-again:
-       switch (proto) {
-       case __constant_htons(ETH_P_IP):
-ip:
-               if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
-                       goto done;
-
-               ip = (const struct iphdr *) (skb->data + nhoff);
-               if (ip_is_fragment(ip))
-                       ip_proto = 0;
-               else
-                       ip_proto = ip->protocol;
-               addr1 = (__force u32) ip->saddr;
-               addr2 = (__force u32) ip->daddr;
-               nhoff += ip->ihl * 4;
-               break;
-       case __constant_htons(ETH_P_IPV6):
-ipv6:
-               if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
-                       goto done;
-
-               ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
-               ip_proto = ip6->nexthdr;
-               addr1 = (__force u32) ip6->saddr.s6_addr32[3];
-               addr2 = (__force u32) ip6->daddr.s6_addr32[3];
-               nhoff += 40;
-               break;
-       case __constant_htons(ETH_P_8021Q):
-               if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
-                       goto done;
-               vlan = (const struct vlan_hdr *) (skb->data + nhoff);
-               proto = vlan->h_vlan_encapsulated_proto;
-               nhoff += sizeof(*vlan);
-               goto again;
-       case __constant_htons(ETH_P_PPP_SES):
-               if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
-                       goto done;
-               proto = *((__be16 *) (skb->data + nhoff +
-                                     sizeof(struct pppoe_hdr)));
-               nhoff += PPPOE_SES_HLEN;
-               switch (proto) {
-               case __constant_htons(PPP_IP):
-                       goto ip;
-               case __constant_htons(PPP_IPV6):
-                       goto ipv6;
-               default:
-                       goto done;
-               }
-       default:
-               goto done;
-       }
-
-       switch (ip_proto) {
-       case IPPROTO_GRE:
-               if (pskb_may_pull(skb, nhoff + 16)) {
-                       u8 *h = skb->data + nhoff;
-                       __be16 flags = *(__be16 *)h;
+       struct flow_keys keys;
+       u32 hash;
 
-                       /*
-                        * Only look inside GRE if version zero and no
-                        * routing
-                        */
-                       if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
-                               proto = *(__be16 *)(h + 2);
-                               nhoff += 4;
-                               if (flags & GRE_CSUM)
-                                       nhoff += 4;
-                               if (flags & GRE_KEY)
-                                       nhoff += 4;
-                               if (flags & GRE_SEQ)
-                                       nhoff += 4;
-                               goto again;
-                       }
-               }
-               break;
-       case IPPROTO_IPIP:
-               goto again;
-       default:
-               break;
-       }
+       if (!skb_flow_dissect(skb, &keys))
+               return;
 
-       ports.v32 = 0;
-       poff = proto_ports_offset(ip_proto);
-       if (poff >= 0) {
-               nhoff += poff;
-               if (pskb_may_pull(skb, nhoff + 4)) {
-                       ports.v32 = * (__force u32 *) (skb->data + nhoff);
-                       if (ports.v16[1] < ports.v16[0])
-                               swap(ports.v16[0], ports.v16[1]);
-                       skb->l4_rxhash = 1;
-               }
+       if (keys.ports) {
+               if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
+                       swap(keys.port16[0], keys.port16[1]);
+               skb->l4_rxhash = 1;
        }
 
        /* get a consistent hash (same value on both flow directions) */
-       if (addr2 < addr1)
-               swap(addr1, addr2);
+       if ((__force u32)keys.dst < (__force u32)keys.src)
+               swap(keys.dst, keys.src);
 
-       hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+       hash = jhash_3words((__force u32)keys.dst,
+                           (__force u32)keys.src,
+                           (__force u32)keys.ports, hashrnd);
        if (!hash)
                hash = 1;
 
-done:
        skb->rxhash = hash;
 }
 EXPORT_SYMBOL(__skb_get_rxhash);
@@ -2718,6 +2652,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
+struct jump_label_key rps_needed __read_mostly;
+
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2997,12 +2933,11 @@ int netif_rx(struct sk_buff *skb)
        if (netpoll_rx(skb))
                return NET_RX_DROP;
 
-       if (netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        trace_netif_rx(skb);
 #ifdef CONFIG_RPS
-       {
+       if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
 
@@ -3017,14 +2952,13 @@ int netif_rx(struct sk_buff *skb)
 
                rcu_read_unlock();
                preempt_enable();
-       }
-#else
+       } else
+#endif
        {
                unsigned int qtail;
                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
                put_cpu();
        }
-#endif
        return ret;
 }
 EXPORT_SYMBOL(netif_rx);
@@ -3230,8 +3164,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
        int ret = NET_RX_DROP;
        __be16 type;
 
-       if (!netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
        trace_netif_receive_skb(skb);
 
@@ -3362,14 +3295,13 @@ out:
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
-       if (netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
 
 #ifdef CONFIG_RPS
-       {
+       if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu, ret;
 
@@ -3380,16 +3312,12 @@ int netif_receive_skb(struct sk_buff *skb)
                if (cpu >= 0) {
                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        rcu_read_unlock();
-               } else {
-                       rcu_read_unlock();
-                       ret = __netif_receive_skb(skb);
+                       return ret;
                }
-
-               return ret;
+               rcu_read_unlock();
        }
-#else
-       return __netif_receive_skb(skb);
 #endif
+       return __netif_receive_skb(skb);
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
@@ -4282,6 +4210,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
                            sizeof(struct dev_iter_state));
 }
 
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+                    const struct seq_operations *ops)
+{
+       return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
 static const struct file_operations dev_seq_fops = {
        .owner   = THIS_MODULE,
        .open    = dev_seq_open,
@@ -4532,7 +4466,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
 
 static int __dev_set_promiscuity(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        uid_t uid;
        gid_t gid;
 
@@ -4589,7 +4523,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
  */
 int dev_set_promiscuity(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        int err;
 
        err = __dev_set_promiscuity(dev, inc);
@@ -4616,7 +4550,7 @@ EXPORT_SYMBOL(dev_set_promiscuity);
 
 int dev_set_allmulti(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
 
        ASSERT_RTNL();
 
@@ -4719,7 +4653,7 @@ EXPORT_SYMBOL(dev_get_flags);
 
 int __dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-       int old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        int ret;
 
        ASSERT_RTNL();
@@ -4802,10 +4736,10 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
  *     Change settings on device based state flags. The flags are
  *     in the userspace exported format.
  */
-int dev_change_flags(struct net_device *dev, unsigned flags)
+int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-       int ret, changes;
-       int old_flags = dev->flags;
+       int ret;
+       unsigned int changes, old_flags = dev->flags;
 
        ret = __dev_change_flags(dev, flags);
        if (ret < 0)
@@ -5362,7 +5296,8 @@ static void rollback_registered(struct net_device *dev)
        list_del(&single);
 }
 
-static u32 netdev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netdev_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /* Fix illegal checksum combinations */
        if ((features & NETIF_F_HW_CSUM) &&
@@ -5371,12 +5306,6 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
        }
 
-       if ((features & NETIF_F_NO_CSUM) &&
-           (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               netdev_warn(dev, "mixed no checksumming and other settings.\n");
-               features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
-       }
-
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
@@ -5424,7 +5353,7 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
 
 int __netdev_update_features(struct net_device *dev)
 {
-       u32 features;
+       netdev_features_t features;
        int err = 0;
 
        ASSERT_RTNL();
@@ -5440,16 +5369,16 @@ int __netdev_update_features(struct net_device *dev)
        if (dev->features == features)
                return 0;
 
-       netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
-               dev->features, features);
+       netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
+               &dev->features, &features);
 
        if (dev->netdev_ops->ndo_set_features)
                err = dev->netdev_ops->ndo_set_features(dev, features);
 
        if (unlikely(err < 0)) {
                netdev_err(dev,
-                       "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
-                       err, features, dev->features);
+                       "set_features() failed (%d); wanted %pNF, left %pNF\n",
+                       err, &features, &dev->features);
                return -1;
        }
 
@@ -5548,6 +5477,9 @@ static void netdev_init_one_queue(struct net_device *dev,
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
+#ifdef CONFIG_BQL
+       dql_init(&queue->dql, HZ);
+#endif
 }
 
 static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -5633,11 +5565,12 @@ int register_netdevice(struct net_device *dev)
        dev->wanted_features = dev->features & dev->hw_features;
 
        /* Turn on no cache copy if HW is doing checksum */
-       dev->hw_features |= NETIF_F_NOCACHE_COPY;
-       if ((dev->features & NETIF_F_ALL_CSUM) &&
-           !(dev->features & NETIF_F_NO_CSUM)) {
-               dev->wanted_features |= NETIF_F_NOCACHE_COPY;
-               dev->features |= NETIF_F_NOCACHE_COPY;
+       if (!(dev->flags & IFF_LOOPBACK)) {
+               dev->hw_features |= NETIF_F_NOCACHE_COPY;
+               if (dev->features & NETIF_F_ALL_CSUM) {
+                       dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+                       dev->features |= NETIF_F_NOCACHE_COPY;
+               }
        }
 
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6373,7 +6306,8 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  *     @one to the master device with current feature set @all.  Will not
  *     enable anything that is off in @mask. Returns the new feature set.
  */
-u32 netdev_increment_features(u32 all, u32 one, u32 mask)
+netdev_features_t netdev_increment_features(netdev_features_t all,
+       netdev_features_t one, netdev_features_t mask)
 {
        if (mask & NETIF_F_GEN_CSUM)
                mask |= NETIF_F_ALL_CSUM;
@@ -6382,10 +6316,6 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask)
        all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
        all &= one | ~NETIF_F_ALL_FOR_ALL;
 
-       /* If device needs checksumming, downgrade to it. */
-       if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
-               all &= ~NETIF_F_NO_CSUM;
-
        /* If one device supports hw checksumming, set for all. */
        if (all & NETIF_F_GEN_CSUM)
                all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
index 277faef9148d0053bc6987ae67e73571d49a4d99..febba516db6274c83d56577e7dc4d7fd61a15b72 100644 (file)
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
 
 static int dev_mc_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open_net(inode, file, &dev_mc_seq_ops,
-                           sizeof(struct seq_net_private));
+       return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
 }
 
 static const struct file_operations dev_mc_seq_fops = {
index d5e2c4c0910763b180fcb73d9722a770c7f2b863..43d94cedbf7c075e5c38dd124d9e91aec8143739 100644 (file)
@@ -366,7 +366,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
                dev_hold(dst->dev);
                dev_put(dev);
                rcu_read_lock();
-               neigh = dst_get_neighbour(dst);
+               neigh = dst_get_neighbour_noref(dst);
                if (neigh && neigh->dev == dev) {
                        neigh->dev = dst->dev;
                        dev_hold(dst->dev);
index f444817071245006200ba882a2f085ee43ae3773..921aa2b4b4158ab1aefab67c474a58065d43c77f 100644 (file)
@@ -36,235 +36,44 @@ u32 ethtool_op_get_link(struct net_device *dev)
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
-u32 ethtool_op_get_tx_csum(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tx_csum);
-
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_IP_CSUM;
-       else
-               dev->features &= ~NETIF_F_IP_CSUM;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_HW_CSUM;
-       else
-               dev->features &= ~NETIF_F_HW_CSUM;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
-
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-       else
-               dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
-
-u32 ethtool_op_get_sg(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_SG) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_sg);
-
-int ethtool_op_set_sg(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_SG;
-       else
-               dev->features &= ~NETIF_F_SG;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_sg);
-
-u32 ethtool_op_get_tso(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_TSO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tso);
-
-int ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_TSO;
-       else
-               dev->features &= ~NETIF_F_TSO;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tso);
-
-u32 ethtool_op_get_ufo(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_UFO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_ufo);
-
-int ethtool_op_set_ufo(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_UFO;
-       else
-               dev->features &= ~NETIF_F_UFO;
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_ufo);
-
-/* the following list of flags are the same as their associated
- * NETIF_F_xxx values in include/linux/netdevice.h
- */
-static const u32 flags_dup_features =
-       (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE |
-        ETH_FLAG_RXHASH);
-
-u32 ethtool_op_get_flags(struct net_device *dev)
-{
-       /* in the future, this function will probably contain additional
-        * handling for flags which are not so easily handled
-        * by a simple masking operation
-        */
-
-       return dev->features & flags_dup_features;
-}
-EXPORT_SYMBOL(ethtool_op_get_flags);
-
-/* Check if device can enable (or disable) particular feature coded in "data"
- * argument. Flags "supported" describe features that can be toggled by device.
- * If feature can not be toggled, it state (enabled or disabled) must match
- * hardcoded device features state, otherwise flags are marked as invalid.
- */
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported)
-{
-       u32 features = dev->features & flags_dup_features;
-       /* "data" can contain only flags_dup_features bits,
-        * see __ethtool_set_flags */
-
-       return (features & ~supported) != (data & ~supported);
-}
-EXPORT_SYMBOL(ethtool_invalid_flags);
-
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
-{
-       if (ethtool_invalid_flags(dev, data, supported))
-               return -EINVAL;
-
-       dev->features = ((dev->features & ~flags_dup_features) |
-                        (data & flags_dup_features));
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_flags);
-
 /* Handlers for each ethtool command */
 
-#define ETHTOOL_DEV_FEATURE_WORDS      1
-
-static void ethtool_get_features_compat(struct net_device *dev,
-       struct ethtool_get_features_block *features)
-{
-       if (!dev->ethtool_ops)
-               return;
-
-       /* getting RX checksum */
-       if (dev->ethtool_ops->get_rx_csum)
-               if (dev->ethtool_ops->get_rx_csum(dev))
-                       features[0].active |= NETIF_F_RXCSUM;
-
-       /* mark legacy-changeable features */
-       if (dev->ethtool_ops->set_sg)
-               features[0].available |= NETIF_F_SG;
-       if (dev->ethtool_ops->set_tx_csum)
-               features[0].available |= NETIF_F_ALL_CSUM;
-       if (dev->ethtool_ops->set_tso)
-               features[0].available |= NETIF_F_ALL_TSO;
-       if (dev->ethtool_ops->set_rx_csum)
-               features[0].available |= NETIF_F_RXCSUM;
-       if (dev->ethtool_ops->set_flags)
-               features[0].available |= flags_dup_features;
-}
-
-static int ethtool_set_feature_compat(struct net_device *dev,
-       int (*legacy_set)(struct net_device *, u32),
-       struct ethtool_set_features_block *features, u32 mask)
-{
-       u32 do_set;
-
-       if (!legacy_set)
-               return 0;
-
-       if (!(features[0].valid & mask))
-               return 0;
-
-       features[0].valid &= ~mask;
-
-       do_set = !!(features[0].requested & mask);
-
-       if (legacy_set(dev, do_set) < 0)
-               netdev_info(dev,
-                       "Legacy feature change (%s) failed for 0x%08x\n",
-                       do_set ? "set" : "clear", mask);
-
-       return 1;
-}
-
-static int ethtool_set_flags_compat(struct net_device *dev,
-       int (*legacy_set)(struct net_device *, u32),
-       struct ethtool_set_features_block *features, u32 mask)
-{
-       u32 value;
-
-       if (!legacy_set)
-               return 0;
-
-       if (!(features[0].valid & mask))
-               return 0;
-
-       value = dev->features & ~features[0].valid;
-       value |= features[0].requested;
-
-       features[0].valid &= ~mask;
-
-       if (legacy_set(dev, value & mask) < 0)
-               netdev_info(dev, "Legacy flags change failed\n");
-
-       return 1;
-}
-
-static int ethtool_set_features_compat(struct net_device *dev,
-       struct ethtool_set_features_block *features)
-{
-       int compat;
-
-       if (!dev->ethtool_ops)
-               return 0;
-
-       compat  = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
-               features, NETIF_F_SG);
-       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
-               features, NETIF_F_ALL_CSUM);
-       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
-               features, NETIF_F_ALL_TSO);
-       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
-               features, NETIF_F_RXCSUM);
-       compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
-               features, flags_dup_features);
-
-       return compat;
-}
+#define ETHTOOL_DEV_FEATURE_WORDS      ((NETDEV_FEATURE_COUNT + 31) / 32)
+
+static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
+       [NETIF_F_SG_BIT] =               "tx-scatter-gather",
+       [NETIF_F_IP_CSUM_BIT] =          "tx-checksum-ipv4",
+       [NETIF_F_HW_CSUM_BIT] =          "tx-checksum-ip-generic",
+       [NETIF_F_IPV6_CSUM_BIT] =        "tx-checksum-ipv6",
+       [NETIF_F_HIGHDMA_BIT] =          "highdma",
+       [NETIF_F_FRAGLIST_BIT] =         "tx-scatter-gather-fraglist",
+       [NETIF_F_HW_VLAN_TX_BIT] =       "tx-vlan-hw-insert",
+
+       [NETIF_F_HW_VLAN_RX_BIT] =       "rx-vlan-hw-parse",
+       [NETIF_F_HW_VLAN_FILTER_BIT] =   "rx-vlan-filter",
+       [NETIF_F_VLAN_CHALLENGED_BIT] =  "vlan-challenged",
+       [NETIF_F_GSO_BIT] =              "tx-generic-segmentation",
+       [NETIF_F_LLTX_BIT] =             "tx-lockless",
+       [NETIF_F_NETNS_LOCAL_BIT] =      "netns-local",
+       [NETIF_F_GRO_BIT] =              "rx-gro",
+       [NETIF_F_LRO_BIT] =              "rx-lro",
+
+       [NETIF_F_TSO_BIT] =              "tx-tcp-segmentation",
+       [NETIF_F_UFO_BIT] =              "tx-udp-fragmentation",
+       [NETIF_F_GSO_ROBUST_BIT] =       "tx-gso-robust",
+       [NETIF_F_TSO_ECN_BIT] =          "tx-tcp-ecn-segmentation",
+       [NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
+       [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
+
+       [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
+       [NETIF_F_SCTP_CSUM_BIT] =        "tx-checksum-sctp",
+       [NETIF_F_FCOE_MTU_BIT] =         "fcoe-mtu",
+       [NETIF_F_NTUPLE_BIT] =           "rx-ntuple-filter",
+       [NETIF_F_RXHASH_BIT] =           "rx-hashing",
+       [NETIF_F_RXCSUM_BIT] =           "rx-checksum",
+       [NETIF_F_NOCACHE_COPY_BIT] =     "tx-nocache-copy",
+       [NETIF_F_LOOPBACK_BIT] =         "loopback",
+};
 
 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
 {
@@ -272,18 +81,21 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
                .cmd = ETHTOOL_GFEATURES,
                .size = ETHTOOL_DEV_FEATURE_WORDS,
        };
-       struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
-               {
-                       .available = dev->hw_features,
-                       .requested = dev->wanted_features,
-                       .active = dev->features,
-                       .never_changed = NETIF_F_NEVER_CHANGE,
-               },
-       };
+       struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
        u32 __user *sizeaddr;
        u32 copy_size;
+       int i;
 
-       ethtool_get_features_compat(dev, features);
+       /* in case feature bits run out again */
+       BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t));
+
+       for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+               features[i].available = (u32)(dev->hw_features >> (32 * i));
+               features[i].requested = (u32)(dev->wanted_features >> (32 * i));
+               features[i].active = (u32)(dev->features >> (32 * i));
+               features[i].never_changed =
+                       (u32)(NETIF_F_NEVER_CHANGE >> (32 * i));
+       }
 
        sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
        if (get_user(copy_size, sizeaddr))
@@ -305,7 +117,8 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_sfeatures cmd;
        struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
-       int ret = 0;
+       netdev_features_t wanted = 0, valid = 0;
+       int i, ret = 0;
 
        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
                return -EFAULT;
@@ -317,65 +130,29 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
        if (copy_from_user(features, useraddr, sizeof(features)))
                return -EFAULT;
 
-       if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
-               return -EINVAL;
+       for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+               valid |= (netdev_features_t)features[i].valid << (32 * i);
+               wanted |= (netdev_features_t)features[i].requested << (32 * i);
+       }
 
-       if (ethtool_set_features_compat(dev, features))
-               ret |= ETHTOOL_F_COMPAT;
+       if (valid & ~NETIF_F_ETHTOOL_BITS)
+               return -EINVAL;
 
-       if (features[0].valid & ~dev->hw_features) {
-               features[0].valid &= dev->hw_features;
+       if (valid & ~dev->hw_features) {
+               valid &= dev->hw_features;
                ret |= ETHTOOL_F_UNSUPPORTED;
        }
 
-       dev->wanted_features &= ~features[0].valid;
-       dev->wanted_features |= features[0].valid & features[0].requested;
+       dev->wanted_features &= ~valid;
+       dev->wanted_features |= wanted & valid;
        __netdev_update_features(dev);
 
-       if ((dev->wanted_features ^ dev->features) & features[0].valid)
+       if ((dev->wanted_features ^ dev->features) & valid)
                ret |= ETHTOOL_F_WISH;
 
        return ret;
 }
 
-static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
-       /* NETIF_F_SG */              "tx-scatter-gather",
-       /* NETIF_F_IP_CSUM */         "tx-checksum-ipv4",
-       /* NETIF_F_NO_CSUM */         "tx-checksum-unneeded",
-       /* NETIF_F_HW_CSUM */         "tx-checksum-ip-generic",
-       /* NETIF_F_IPV6_CSUM */       "tx-checksum-ipv6",
-       /* NETIF_F_HIGHDMA */         "highdma",
-       /* NETIF_F_FRAGLIST */        "tx-scatter-gather-fraglist",
-       /* NETIF_F_HW_VLAN_TX */      "tx-vlan-hw-insert",
-
-       /* NETIF_F_HW_VLAN_RX */      "rx-vlan-hw-parse",
-       /* NETIF_F_HW_VLAN_FILTER */  "rx-vlan-filter",
-       /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
-       /* NETIF_F_GSO */             "tx-generic-segmentation",
-       /* NETIF_F_LLTX */            "tx-lockless",
-       /* NETIF_F_NETNS_LOCAL */     "netns-local",
-       /* NETIF_F_GRO */             "rx-gro",
-       /* NETIF_F_LRO */             "rx-lro",
-
-       /* NETIF_F_TSO */             "tx-tcp-segmentation",
-       /* NETIF_F_UFO */             "tx-udp-fragmentation",
-       /* NETIF_F_GSO_ROBUST */      "tx-gso-robust",
-       /* NETIF_F_TSO_ECN */         "tx-tcp-ecn-segmentation",
-       /* NETIF_F_TSO6 */            "tx-tcp6-segmentation",
-       /* NETIF_F_FSO */             "tx-fcoe-segmentation",
-       "",
-       "",
-
-       /* NETIF_F_FCOE_CRC */        "tx-checksum-fcoe-crc",
-       /* NETIF_F_SCTP_CSUM */       "tx-checksum-sctp",
-       /* NETIF_F_FCOE_MTU */        "fcoe-mtu",
-       /* NETIF_F_NTUPLE */          "rx-ntuple-filter",
-       /* NETIF_F_RXHASH */          "rx-hashing",
-       /* NETIF_F_RXCSUM */          "rx-checksum",
-       /* NETIF_F_NOCACHE_COPY */    "tx-nocache-copy",
-       /* NETIF_F_LOOPBACK */        "loopback",
-};
-
 static int __ethtool_get_sset_count(struct net_device *dev, int sset)
 {
        const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -402,7 +179,7 @@ static void __ethtool_get_strings(struct net_device *dev,
                ops->get_strings(dev, stringset, data);
 }
 
-static u32 ethtool_get_feature_mask(u32 eth_cmd)
+static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
 {
        /* feature masks of legacy discrete ethtool ops */
 
@@ -433,136 +210,82 @@ static u32 ethtool_get_feature_mask(u32 eth_cmd)
        }
 }
 
-static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
-{
-       const struct ethtool_ops *ops = dev->ethtool_ops;
-
-       if (!ops)
-               return NULL;
-
-       switch (ethcmd) {
-       case ETHTOOL_GTXCSUM:
-               return ops->get_tx_csum;
-       case ETHTOOL_GRXCSUM:
-               return ops->get_rx_csum;
-       case ETHTOOL_SSG:
-               return ops->get_sg;
-       case ETHTOOL_STSO:
-               return ops->get_tso;
-       case ETHTOOL_SUFO:
-               return ops->get_ufo;
-       default:
-               return NULL;
-       }
-}
-
-static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
-{
-       return !!(dev->features & NETIF_F_ALL_CSUM);
-}
-
 static int ethtool_get_one_feature(struct net_device *dev,
        char __user *useraddr, u32 ethcmd)
 {
-       u32 mask = ethtool_get_feature_mask(ethcmd);
+       netdev_features_t mask = ethtool_get_feature_mask(ethcmd);
        struct ethtool_value edata = {
                .cmd = ethcmd,
                .data = !!(dev->features & mask),
        };
 
-       /* compatibility with discrete get_ ops */
-       if (!(dev->hw_features & mask)) {
-               u32 (*actor)(struct net_device *);
-
-               actor = __ethtool_get_one_feature_actor(dev, ethcmd);
-
-               /* bug compatibility with old get_rx_csum */
-               if (ethcmd == ETHTOOL_GRXCSUM && !actor)
-                       actor = __ethtool_get_rx_csum_oldbug;
-
-               if (actor)
-                       edata.data = actor(dev);
-       }
-
        if (copy_to_user(useraddr, &edata, sizeof(edata)))
                return -EFAULT;
        return 0;
 }
 
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_sg(struct net_device *dev, u32 data);
-static int __ethtool_set_tso(struct net_device *dev, u32 data);
-static int __ethtool_set_ufo(struct net_device *dev, u32 data);
-
 static int ethtool_set_one_feature(struct net_device *dev,
        void __user *useraddr, u32 ethcmd)
 {
        struct ethtool_value edata;
-       u32 mask;
+       netdev_features_t mask;
 
        if (copy_from_user(&edata, useraddr, sizeof(edata)))
                return -EFAULT;
 
        mask = ethtool_get_feature_mask(ethcmd);
        mask &= dev->hw_features;
-       if (mask) {
-               if (edata.data)
-                       dev->wanted_features |= mask;
-               else
-                       dev->wanted_features &= ~mask;
+       if (!mask)
+               return -EOPNOTSUPP;
 
-               __netdev_update_features(dev);
-               return 0;
-       }
+       if (edata.data)
+               dev->wanted_features |= mask;
+       else
+               dev->wanted_features &= ~mask;
 
-       /* Driver is not converted to ndo_fix_features or does not
-        * support changing this offload. In the latter case it won't
-        * have corresponding ethtool_ops field set.
-        *
-        * Following part is to be removed after all drivers advertise
-        * their changeable features in netdev->hw_features and stop
-        * using discrete offload setting ops.
-        */
+       __netdev_update_features(dev);
 
-       switch (ethcmd) {
-       case ETHTOOL_STXCSUM:
-               return __ethtool_set_tx_csum(dev, edata.data);
-       case ETHTOOL_SRXCSUM:
-               return __ethtool_set_rx_csum(dev, edata.data);
-       case ETHTOOL_SSG:
-               return __ethtool_set_sg(dev, edata.data);
-       case ETHTOOL_STSO:
-               return __ethtool_set_tso(dev, edata.data);
-       case ETHTOOL_SUFO:
-               return __ethtool_set_ufo(dev, edata.data);
-       default:
-               return -EOPNOTSUPP;
-       }
+       return 0;
+}
+
+#define ETH_ALL_FLAGS    (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
+                         ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
+#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \
+                         NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH)
+
+static u32 __ethtool_get_flags(struct net_device *dev)
+{
+       u32 flags = 0;
+
+       if (dev->features & NETIF_F_LRO)        flags |= ETH_FLAG_LRO;
+       if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN;
+       if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN;
+       if (dev->features & NETIF_F_NTUPLE)     flags |= ETH_FLAG_NTUPLE;
+       if (dev->features & NETIF_F_RXHASH)     flags |= ETH_FLAG_RXHASH;
+
+       return flags;
 }
 
-int __ethtool_set_flags(struct net_device *dev, u32 data)
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
 {
-       u32 changed;
+       netdev_features_t features = 0, changed;
 
-       if (data & ~flags_dup_features)
+       if (data & ~ETH_ALL_FLAGS)
                return -EINVAL;
 
-       /* legacy set_flags() op */
-       if (dev->ethtool_ops->set_flags) {
-               if (unlikely(dev->hw_features & flags_dup_features))
-                       netdev_warn(dev,
-                               "driver BUG: mixed hw_features and set_flags()\n");
-               return dev->ethtool_ops->set_flags(dev, data);
-       }
+       if (data & ETH_FLAG_LRO)        features |= NETIF_F_LRO;
+       if (data & ETH_FLAG_RXVLAN)     features |= NETIF_F_HW_VLAN_RX;
+       if (data & ETH_FLAG_TXVLAN)     features |= NETIF_F_HW_VLAN_TX;
+       if (data & ETH_FLAG_NTUPLE)     features |= NETIF_F_NTUPLE;
+       if (data & ETH_FLAG_RXHASH)     features |= NETIF_F_RXHASH;
 
        /* allow changing only bits set in hw_features */
-       changed = (data ^ dev->features) & flags_dup_features;
+       changed = (features ^ dev->features) & ETH_ALL_FEATURES;
        if (changed & ~dev->hw_features)
                return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
 
        dev->wanted_features =
-               (dev->wanted_features & ~changed) | (data & dev->hw_features);
+               (dev->wanted_features & ~changed) | (features & changed);
 
        __netdev_update_features(dev);
 
@@ -716,6 +439,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
 {
        struct ethtool_rxnfc info;
        size_t info_size = sizeof(info);
+       int rc;
 
        if (!dev->ethtool_ops->set_rxnfc)
                return -EOPNOTSUPP;
@@ -731,7 +455,15 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
        if (copy_from_user(&info, useraddr, info_size))
                return -EFAULT;
 
-       return dev->ethtool_ops->set_rxnfc(dev, &info);
+       rc = dev->ethtool_ops->set_rxnfc(dev, &info);
+       if (rc)
+               return rc;
+
+       if (cmd == ETHTOOL_SRXCLSRLINS &&
+           copy_to_user(useraddr, &info, info_size))
+               return -EFAULT;
+
+       return 0;
 }
 
 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
@@ -792,34 +524,44 @@ err_out:
 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
                                                     void __user *useraddr)
 {
-       struct ethtool_rxfh_indir *indir;
-       u32 table_size;
-       size_t full_size;
+       u32 user_size, dev_size;
+       u32 *indir;
        int ret;
 
-       if (!dev->ethtool_ops->get_rxfh_indir)
+       if (!dev->ethtool_ops->get_rxfh_indir_size ||
+           !dev->ethtool_ops->get_rxfh_indir)
+               return -EOPNOTSUPP;
+       dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+       if (dev_size == 0)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&table_size,
+       if (copy_from_user(&user_size,
                           useraddr + offsetof(struct ethtool_rxfh_indir, size),
-                          sizeof(table_size)))
+                          sizeof(user_size)))
                return -EFAULT;
 
-       if (table_size >
-           (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
-               return -ENOMEM;
-       full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-       indir = kzalloc(full_size, GFP_USER);
+       if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
+                        &dev_size, sizeof(dev_size)))
+               return -EFAULT;
+
+       /* If the user buffer size is 0, this is just a query for the
+        * device table size.  Otherwise, if it's smaller than the
+        * device table size it's an error.
+        */
+       if (user_size < dev_size)
+               return user_size == 0 ? 0 : -EINVAL;
+
+       indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
        if (!indir)
                return -ENOMEM;
 
-       indir->cmd = ETHTOOL_GRXFHINDIR;
-       indir->size = table_size;
        ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
        if (ret)
                goto out;
 
-       if (copy_to_user(useraddr, indir, full_size))
+       if (copy_to_user(useraddr +
+                        offsetof(struct ethtool_rxfh_indir, ring_index[0]),
+                        indir, dev_size * sizeof(indir[0])))
                ret = -EFAULT;
 
 out:
@@ -830,30 +572,56 @@ out:
 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
                                                     void __user *useraddr)
 {
-       struct ethtool_rxfh_indir *indir;
-       u32 table_size;
-       size_t full_size;
+       struct ethtool_rxnfc rx_rings;
+       u32 user_size, dev_size, i;
+       u32 *indir;
        int ret;
 
-       if (!dev->ethtool_ops->set_rxfh_indir)
+       if (!dev->ethtool_ops->get_rxfh_indir_size ||
+           !dev->ethtool_ops->set_rxfh_indir ||
+           !dev->ethtool_ops->get_rxnfc)
+               return -EOPNOTSUPP;
+       dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+       if (dev_size == 0)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&table_size,
+       if (copy_from_user(&user_size,
                           useraddr + offsetof(struct ethtool_rxfh_indir, size),
-                          sizeof(table_size)))
+                          sizeof(user_size)))
                return -EFAULT;
 
-       if (table_size >
-           (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
-               return -ENOMEM;
-       full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-       indir = kmalloc(full_size, GFP_USER);
+       if (user_size != 0 && user_size != dev_size)
+               return -EINVAL;
+
+       indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
        if (!indir)
                return -ENOMEM;
 
-       if (copy_from_user(indir, useraddr, full_size)) {
-               ret = -EFAULT;
+       rx_rings.cmd = ETHTOOL_GRXRINGS;
+       ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL);
+       if (ret)
                goto out;
+
+       if (user_size == 0) {
+               for (i = 0; i < dev_size; i++)
+                       indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+       } else {
+               if (copy_from_user(indir,
+                                 useraddr +
+                                 offsetof(struct ethtool_rxfh_indir,
+                                          ring_index[0]),
+                                 dev_size * sizeof(indir[0]))) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+
+               /* Validate ring indices */
+               for (i = 0; i < dev_size; i++) {
+                       if (indir[i] >= rx_rings.data) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+               }
        }
 
        ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
@@ -863,58 +631,6 @@ out:
        return ret;
 }
 
-/*
- * ethtool does not (or did not) set masks for flow parameters that are
- * not specified, so if both value and mask are 0 then this must be
- * treated as equivalent to a mask with all bits set.  Implement that
- * here rather than in drivers.
- */
-static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
-{
-       struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
-       struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
-
-       if (fs->flow_type != TCP_V4_FLOW &&
-           fs->flow_type != UDP_V4_FLOW &&
-           fs->flow_type != SCTP_V4_FLOW)
-               return;
-
-       if (!(entry->ip4src | mask->ip4src))
-               mask->ip4src = htonl(0xffffffff);
-       if (!(entry->ip4dst | mask->ip4dst))
-               mask->ip4dst = htonl(0xffffffff);
-       if (!(entry->psrc | mask->psrc))
-               mask->psrc = htons(0xffff);
-       if (!(entry->pdst | mask->pdst))
-               mask->pdst = htons(0xffff);
-       if (!(entry->tos | mask->tos))
-               mask->tos = 0xff;
-       if (!(fs->vlan_tag | fs->vlan_tag_mask))
-               fs->vlan_tag_mask = 0xffff;
-       if (!(fs->data | fs->data_mask))
-               fs->data_mask = 0xffffffffffffffffULL;
-}
-
-static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
-                                                   void __user *useraddr)
-{
-       struct ethtool_rx_ntuple cmd;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
-
-       if (!ops->set_rx_ntuple)
-               return -EOPNOTSUPP;
-
-       if (!(dev->features & NETIF_F_NTUPLE))
-               return -EINVAL;
-
-       if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
-               return -EFAULT;
-
-       rx_ntuple_fix_masks(&cmd.fs);
-
-       return ops->set_rx_ntuple(dev, &cmd);
-}
-
 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
 {
        struct ethtool_regs regs;
@@ -1231,81 +947,6 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
        return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
 }
 
-static int __ethtool_set_sg(struct net_device *dev, u32 data)
-{
-       int err;
-
-       if (!dev->ethtool_ops->set_sg)
-               return -EOPNOTSUPP;
-
-       if (data && !(dev->features & NETIF_F_ALL_CSUM))
-               return -EINVAL;
-
-       if (!data && dev->ethtool_ops->set_tso) {
-               err = dev->ethtool_ops->set_tso(dev, 0);
-               if (err)
-                       return err;
-       }
-
-       if (!data && dev->ethtool_ops->set_ufo) {
-               err = dev->ethtool_ops->set_ufo(dev, 0);
-               if (err)
-                       return err;
-       }
-       return dev->ethtool_ops->set_sg(dev, data);
-}
-
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
-{
-       int err;
-
-       if (!dev->ethtool_ops->set_tx_csum)
-               return -EOPNOTSUPP;
-
-       if (!data && dev->ethtool_ops->set_sg) {
-               err = __ethtool_set_sg(dev, 0);
-               if (err)
-                       return err;
-       }
-
-       return dev->ethtool_ops->set_tx_csum(dev, data);
-}
-
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
-{
-       if (!dev->ethtool_ops->set_rx_csum)
-               return -EOPNOTSUPP;
-
-       if (!data)
-               dev->features &= ~NETIF_F_GRO;
-
-       return dev->ethtool_ops->set_rx_csum(dev, data);
-}
-
-static int __ethtool_set_tso(struct net_device *dev, u32 data)
-{
-       if (!dev->ethtool_ops->set_tso)
-               return -EOPNOTSUPP;
-
-       if (data && !(dev->features & NETIF_F_SG))
-               return -EINVAL;
-
-       return dev->ethtool_ops->set_tso(dev, data);
-}
-
-static int __ethtool_set_ufo(struct net_device *dev, u32 data)
-{
-       if (!dev->ethtool_ops->set_ufo)
-               return -EOPNOTSUPP;
-       if (data && !(dev->features & NETIF_F_SG))
-               return -EINVAL;
-       if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
-               (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
-                       == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
-               return -EINVAL;
-       return dev->ethtool_ops->set_ufo(dev, data);
-}
-
 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
 {
        struct ethtool_test test;
@@ -1771,9 +1412,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
                break;
        case ETHTOOL_GFLAGS:
                rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_flags ?
-                                       dev->ethtool_ops->get_flags :
-                                       ethtool_op_get_flags));
+                                       __ethtool_get_flags);
                break;
        case ETHTOOL_SFLAGS:
                rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
@@ -1804,9 +1443,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_RESET:
                rc = ethtool_reset(dev, useraddr);
                break;
-       case ETHTOOL_SRXNTUPLE:
-               rc = ethtool_set_rx_ntuple(dev, useraddr);
-               break;
        case ETHTOOL_GSSET_INFO:
                rc = ethtool_get_sset_info(dev, useraddr);
                break;
index 8ae42de9c79e78379691f20207987c5b2ea338b6..e318c7e98042ffcfd1fc43a1d04b5cf816d651a8 100644 (file)
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
        put_online_cpus();
 }
 
+static void flow_cache_flush_task(struct work_struct *work)
+{
+       flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+       schedule_work(&flow_cache_flush_work);
+}
+
 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
new file mode 100644 (file)
index 0000000..0985b9b
--- /dev/null
@@ -0,0 +1,143 @@
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <net/flow_keys.h>
+
+/* copy saddr & daddr, possibly using 64bit load/store
+ * Equivalent to :     flow->src = iph->saddr;
+ *                     flow->dst = iph->daddr;
+ */
+static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+{
+       BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
+                    offsetof(typeof(*flow), src) + sizeof(flow->src));
+       memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+}
+
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+       int poff, nhoff = skb_network_offset(skb);
+       u8 ip_proto;
+       __be16 proto = skb->protocol;
+
+       memset(flow, 0, sizeof(*flow));
+
+again:
+       switch (proto) {
+       case __constant_htons(ETH_P_IP): {
+               const struct iphdr *iph;
+               struct iphdr _iph;
+ip:
+               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+               if (!iph)
+                       return false;
+
+               if (ip_is_fragment(iph))
+                       ip_proto = 0;
+               else
+                       ip_proto = iph->protocol;
+               iph_to_flow_copy_addrs(flow, iph);
+               nhoff += iph->ihl * 4;
+               break;
+       }
+       case __constant_htons(ETH_P_IPV6): {
+               const struct ipv6hdr *iph;
+               struct ipv6hdr _iph;
+ipv6:
+               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+               if (!iph)
+                       return false;
+
+               ip_proto = iph->nexthdr;
+               flow->src = iph->saddr.s6_addr32[3];
+               flow->dst = iph->daddr.s6_addr32[3];
+               nhoff += sizeof(struct ipv6hdr);
+               break;
+       }
+       case __constant_htons(ETH_P_8021Q): {
+               const struct vlan_hdr *vlan;
+               struct vlan_hdr _vlan;
+
+               vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
+               if (!vlan)
+                       return false;
+
+               proto = vlan->h_vlan_encapsulated_proto;
+               nhoff += sizeof(*vlan);
+               goto again;
+       }
+       case __constant_htons(ETH_P_PPP_SES): {
+               struct {
+                       struct pppoe_hdr hdr;
+                       __be16 proto;
+               } *hdr, _hdr;
+               hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+               if (!hdr)
+                       return false;
+               proto = hdr->proto;
+               nhoff += PPPOE_SES_HLEN;
+               switch (proto) {
+               case __constant_htons(PPP_IP):
+                       goto ip;
+               case __constant_htons(PPP_IPV6):
+                       goto ipv6;
+               default:
+                       return false;
+               }
+       }
+       default:
+               return false;
+       }
+
+       switch (ip_proto) {
+       case IPPROTO_GRE: {
+               struct gre_hdr {
+                       __be16 flags;
+                       __be16 proto;
+               } *hdr, _hdr;
+
+               hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+               if (!hdr)
+                       return false;
+               /*
+                * Only look inside GRE if version zero and no
+                * routing
+                */
+               if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
+                       proto = hdr->proto;
+                       nhoff += 4;
+                       if (hdr->flags & GRE_CSUM)
+                               nhoff += 4;
+                       if (hdr->flags & GRE_KEY)
+                               nhoff += 4;
+                       if (hdr->flags & GRE_SEQ)
+                               nhoff += 4;
+                       goto again;
+               }
+               break;
+       }
+       case IPPROTO_IPIP:
+               goto again;
+       default:
+               break;
+       }
+
+       flow->ip_proto = ip_proto;
+       poff = proto_ports_offset(ip_proto);
+       if (poff >= 0) {
+               __be32 *ports, _ports;
+
+               nhoff += poff;
+               ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
+               if (ports)
+                       flow->ports = *ports;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(skb_flow_dissect);
index 039d51e6c284e7ab655319d399b9d40060357dcf..e287346e09343f1f315866fc7e77fb3770d927b0 100644 (file)
@@ -238,6 +238,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
                                   it to safe state.
                                 */
                                skb_queue_purge(&n->arp_queue);
+                               n->arp_queue_len_bytes = 0;
                                n->output = neigh_blackhole;
                                if (n->nud_state & NUD_VALID)
                                        n->nud_state = NUD_NOARP;
@@ -272,7 +273,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 }
 EXPORT_SYMBOL(neigh_ifdown);
 
-static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
 {
        struct neighbour *n = NULL;
        unsigned long now = jiffies;
@@ -287,7 +288,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
                        goto out_entries;
        }
 
-       n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
+       if (tbl->entry_size)
+               n = kzalloc(tbl->entry_size, GFP_ATOMIC);
+       else {
+               int sz = sizeof(*n) + tbl->key_len;
+
+               sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
+               sz += dev->neigh_priv_len;
+               n = kzalloc(sz, GFP_ATOMIC);
+       }
        if (!n)
                goto out_entries;
 
@@ -313,11 +322,18 @@ out_entries:
        goto out;
 }
 
+static void neigh_get_hash_rnd(u32 *x)
+{
+       get_random_bytes(x, sizeof(*x));
+       *x |= 1;
+}
+
 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 {
        size_t size = (1 << shift) * sizeof(struct neighbour *);
        struct neigh_hash_table *ret;
        struct neighbour __rcu **buckets;
+       int i;
 
        ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
        if (!ret)
@@ -334,8 +350,8 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
        }
        ret->hash_buckets = buckets;
        ret->hash_shift = shift;
-       get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
-       ret->hash_rnd |= 1;
+       for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
+               neigh_get_hash_rnd(&ret->hash_rnd[i]);
        return ret;
 }
 
@@ -462,7 +478,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
        u32 hash_val;
        int key_len = tbl->key_len;
        int error;
-       struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+       struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
        struct neigh_hash_table *nht;
 
        if (!n) {
@@ -480,6 +496,14 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
                goto out_neigh_release;
        }
 
+       if (dev->netdev_ops->ndo_neigh_construct) {
+               error = dev->netdev_ops->ndo_neigh_construct(n);
+               if (error < 0) {
+                       rc = ERR_PTR(error);
+                       goto out_neigh_release;
+               }
+       }
+
        /* Device specific setup. */
        if (n->parms->neigh_setup &&
            (error = n->parms->neigh_setup(n)) < 0) {
@@ -677,18 +701,14 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
                neigh_parms_destroy(parms);
 }
 
-static void neigh_destroy_rcu(struct rcu_head *head)
-{
-       struct neighbour *neigh = container_of(head, struct neighbour, rcu);
-
-       kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
-}
 /*
  *     neighbour must already be out of the table;
  *
  */
 void neigh_destroy(struct neighbour *neigh)
 {
+       struct net_device *dev = neigh->dev;
+
        NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 
        if (!neigh->dead) {
@@ -702,14 +722,18 @@ void neigh_destroy(struct neighbour *neigh)
                printk(KERN_WARNING "Impossible event.\n");
 
        skb_queue_purge(&neigh->arp_queue);
+       neigh->arp_queue_len_bytes = 0;
 
-       dev_put(neigh->dev);
+       if (dev->netdev_ops->ndo_neigh_destroy)
+               dev->netdev_ops->ndo_neigh_destroy(neigh);
+
+       dev_put(dev);
        neigh_parms_put(neigh->parms);
 
        NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
 
        atomic_dec(&neigh->tbl->entries);
-       call_rcu(&neigh->rcu, neigh_destroy_rcu);
+       kfree_rcu(neigh, rcu);
 }
 EXPORT_SYMBOL(neigh_destroy);
 
@@ -842,6 +866,7 @@ static void neigh_invalidate(struct neighbour *neigh)
                write_lock(&neigh->lock);
        }
        skb_queue_purge(&neigh->arp_queue);
+       neigh->arp_queue_len_bytes = 0;
 }
 
 static void neigh_probe(struct neighbour *neigh)
@@ -980,15 +1005,20 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
        if (neigh->nud_state == NUD_INCOMPLETE) {
                if (skb) {
-                       if (skb_queue_len(&neigh->arp_queue) >=
-                           neigh->parms->queue_len) {
+                       while (neigh->arp_queue_len_bytes + skb->truesize >
+                              neigh->parms->queue_len_bytes) {
                                struct sk_buff *buff;
+
                                buff = __skb_dequeue(&neigh->arp_queue);
+                               if (!buff)
+                                       break;
+                               neigh->arp_queue_len_bytes -= buff->truesize;
                                kfree_skb(buff);
                                NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
                        }
                        skb_dst_force(skb);
                        __skb_queue_tail(&neigh->arp_queue, skb);
+                       neigh->arp_queue_len_bytes += skb->truesize;
                }
                rc = 1;
        }
@@ -1167,7 +1197,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
 
                        rcu_read_lock();
                        /* On shaper/eql skb->dst->neighbour != neigh :( */
-                       if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
+                       if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
                                n1 = n2;
                        n1->output(n1, skb);
                        rcu_read_unlock();
@@ -1175,6 +1205,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                        write_lock_bh(&neigh->lock);
                }
                skb_queue_purge(&neigh->arp_queue);
+               neigh->arp_queue_len_bytes = 0;
        }
 out:
        if (update_isrouter) {
@@ -1477,11 +1508,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
        tbl->parms.reachable_time =
                          neigh_rand_reach_time(tbl->parms.base_reachable_time);
 
-       if (!tbl->kmem_cachep)
-               tbl->kmem_cachep =
-                       kmem_cache_create(tbl->id, tbl->entry_size, 0,
-                                         SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-                                         NULL);
        tbl->stats = alloc_percpu(struct neigh_statistics);
        if (!tbl->stats)
                panic("cannot create neighbour cache statistics");
@@ -1566,9 +1592,6 @@ int neigh_table_clear(struct neigh_table *tbl)
        free_percpu(tbl->stats);
        tbl->stats = NULL;
 
-       kmem_cache_destroy(tbl->kmem_cachep);
-       tbl->kmem_cachep = NULL;
-
        return 0;
 }
 EXPORT_SYMBOL(neigh_table_clear);
@@ -1747,7 +1770,11 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
                NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
 
        NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
-       NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+       NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
+       /* approximative value for deprecated QUEUE_LEN (in packets) */
+       NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
+                   DIV_ROUND_UP(parms->queue_len_bytes,
+                                SKB_TRUESIZE(ETH_FRAME_LEN)));
        NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
        NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
        NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
@@ -1808,7 +1835,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
 
                rcu_read_lock_bh();
                nht = rcu_dereference_bh(tbl->nht);
-               ndc.ndtc_hash_rnd = nht->hash_rnd;
+               ndc.ndtc_hash_rnd = nht->hash_rnd[0];
                ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
                rcu_read_unlock_bh();
 
@@ -1974,7 +2001,11 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
                        switch (i) {
                        case NDTPA_QUEUE_LEN:
-                               p->queue_len = nla_get_u32(tbp[i]);
+                               p->queue_len_bytes = nla_get_u32(tbp[i]) *
+                                                    SKB_TRUESIZE(ETH_FRAME_LEN);
+                               break;
+                       case NDTPA_QUEUE_LENBYTES:
+                               p->queue_len_bytes = nla_get_u32(tbp[i]);
                                break;
                        case NDTPA_PROXY_QLEN:
                                p->proxy_qlen = nla_get_u32(tbp[i]);
@@ -2397,7 +2428,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct neigh_table *tbl = state->tbl;
 
-       pn = pn->next;
+       do {
+               pn = pn->next;
+       } while (pn && !net_eq(pneigh_net(pn), net));
+
        while (!pn) {
                if (++state->bucket > PNEIGH_HASHMASK)
                        break;
@@ -2635,117 +2669,158 @@ EXPORT_SYMBOL(neigh_app_ns);
 
 #ifdef CONFIG_SYSCTL
 
-#define NEIGH_VARS_MAX 19
+static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
+                          size_t *lenp, loff_t *ppos)
+{
+       int size, ret;
+       ctl_table tmp = *ctl;
+
+       tmp.data = &size;
+       size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
+       ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+       if (write && !ret)
+               *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
+       return ret;
+}
+
+enum {
+       NEIGH_VAR_MCAST_PROBE,
+       NEIGH_VAR_UCAST_PROBE,
+       NEIGH_VAR_APP_PROBE,
+       NEIGH_VAR_RETRANS_TIME,
+       NEIGH_VAR_BASE_REACHABLE_TIME,
+       NEIGH_VAR_DELAY_PROBE_TIME,
+       NEIGH_VAR_GC_STALETIME,
+       NEIGH_VAR_QUEUE_LEN,
+       NEIGH_VAR_QUEUE_LEN_BYTES,
+       NEIGH_VAR_PROXY_QLEN,
+       NEIGH_VAR_ANYCAST_DELAY,
+       NEIGH_VAR_PROXY_DELAY,
+       NEIGH_VAR_LOCKTIME,
+       NEIGH_VAR_RETRANS_TIME_MS,
+       NEIGH_VAR_BASE_REACHABLE_TIME_MS,
+       NEIGH_VAR_GC_INTERVAL,
+       NEIGH_VAR_GC_THRESH1,
+       NEIGH_VAR_GC_THRESH2,
+       NEIGH_VAR_GC_THRESH3,
+       NEIGH_VAR_MAX
+};
 
 static struct neigh_sysctl_table {
        struct ctl_table_header *sysctl_header;
-       struct ctl_table neigh_vars[NEIGH_VARS_MAX];
+       struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
        char *dev_name;
 } neigh_sysctl_template __read_mostly = {
        .neigh_vars = {
-               {
+               [NEIGH_VAR_MCAST_PROBE] = {
                        .procname       = "mcast_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_UCAST_PROBE] = {
                        .procname       = "ucast_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_APP_PROBE] = {
                        .procname       = "app_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_RETRANS_TIME] = {
                        .procname       = "retrans_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_BASE_REACHABLE_TIME] = {
                        .procname       = "base_reachable_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_DELAY_PROBE_TIME] = {
                        .procname       = "delay_first_probe_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_GC_STALETIME] = {
                        .procname       = "gc_stale_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_QUEUE_LEN] = {
                        .procname       = "unres_qlen",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
+                       .proc_handler   = proc_unres_qlen,
+               },
+               [NEIGH_VAR_QUEUE_LEN_BYTES] = {
+                       .procname       = "unres_qlen_bytes",
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_PROXY_QLEN] = {
                        .procname       = "proxy_qlen",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_ANYCAST_DELAY] = {
                        .procname       = "anycast_delay",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_PROXY_DELAY] = {
                        .procname       = "proxy_delay",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_LOCKTIME] = {
                        .procname       = "locktime",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_RETRANS_TIME_MS] = {
                        .procname       = "retrans_time_ms",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_ms_jiffies,
                },
-               {
+               [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
                        .procname       = "base_reachable_time_ms",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_ms_jiffies,
                },
-               {
+               [NEIGH_VAR_GC_INTERVAL] = {
                        .procname       = "gc_interval",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_GC_THRESH1] = {
                        .procname       = "gc_thresh1",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_GC_THRESH2] = {
                        .procname       = "gc_thresh2",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_GC_THRESH3] = {
                        .procname       = "gc_thresh3",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
@@ -2778,47 +2853,49 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
        if (!t)
                goto err;
 
-       t->neigh_vars[0].data  = &p->mcast_probes;
-       t->neigh_vars[1].data  = &p->ucast_probes;
-       t->neigh_vars[2].data  = &p->app_probes;
-       t->neigh_vars[3].data  = &p->retrans_time;
-       t->neigh_vars[4].data  = &p->base_reachable_time;
-       t->neigh_vars[5].data  = &p->delay_probe_time;
-       t->neigh_vars[6].data  = &p->gc_staletime;
-       t->neigh_vars[7].data  = &p->queue_len;
-       t->neigh_vars[8].data  = &p->proxy_qlen;
-       t->neigh_vars[9].data  = &p->anycast_delay;
-       t->neigh_vars[10].data = &p->proxy_delay;
-       t->neigh_vars[11].data = &p->locktime;
-       t->neigh_vars[12].data  = &p->retrans_time;
-       t->neigh_vars[13].data  = &p->base_reachable_time;
+       t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data  = &p->mcast_probes;
+       t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data  = &p->ucast_probes;
+       t->neigh_vars[NEIGH_VAR_APP_PROBE].data  = &p->app_probes;
+       t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data  = &p->retrans_time;
+       t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data  = &p->base_reachable_time;
+       t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data  = &p->delay_probe_time;
+       t->neigh_vars[NEIGH_VAR_GC_STALETIME].data  = &p->gc_staletime;
+       t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data  = &p->queue_len_bytes;
+       t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data  = &p->queue_len_bytes;
+       t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data  = &p->proxy_qlen;
+       t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data  = &p->anycast_delay;
+       t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
+       t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
+       t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data  = &p->retrans_time;
+       t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data  = &p->base_reachable_time;
 
        if (dev) {
                dev_name_source = dev->name;
                /* Terminate the table early */
-               memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
+               memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
+                      sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
        } else {
                dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
-               t->neigh_vars[14].data = (int *)(p + 1);
-               t->neigh_vars[15].data = (int *)(p + 1) + 1;
-               t->neigh_vars[16].data = (int *)(p + 1) + 2;
-               t->neigh_vars[17].data = (int *)(p + 1) + 3;
+               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
+               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
        }
 
 
        if (handler) {
                /* RetransTime */
-               t->neigh_vars[3].proc_handler = handler;
-               t->neigh_vars[3].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
                /* ReachableTime */
-               t->neigh_vars[4].proc_handler = handler;
-               t->neigh_vars[4].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
                /* RetransTime (in milliseconds)*/
-               t->neigh_vars[12].proc_handler = handler;
-               t->neigh_vars[12].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
                /* ReachableTime (in milliseconds) */
-               t->neigh_vars[13].proc_handler = handler;
-               t->neigh_vars[13].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
        }
 
        t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
index c71c434a4c053e440dc816682d944c521e05c50f..abf4393a77b3b1de706931815a72680bfbd78586 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/wireless.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
+#include <linux/jiffies.h>
 #include <net/wext.h>
 
 #include "net-sysfs.h"
@@ -606,9 +607,12 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        rcu_assign_pointer(queue->rps_map, map);
        spin_unlock(&rps_map_lock);
 
-       if (old_map)
+       if (map)
+               jump_label_inc(&rps_needed);
+       if (old_map) {
                kfree_rcu(old_map, rcu);
-
+               jump_label_dec(&rps_needed);
+       }
        free_cpumask_var(mask);
        return len;
 }
@@ -618,15 +622,15 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
                                           char *buf)
 {
        struct rps_dev_flow_table *flow_table;
-       unsigned int val = 0;
+       unsigned long val = 0;
 
        rcu_read_lock();
        flow_table = rcu_dereference(queue->rps_flow_table);
        if (flow_table)
-               val = flow_table->mask + 1;
+               val = (unsigned long)flow_table->mask + 1;
        rcu_read_unlock();
 
-       return sprintf(buf, "%u\n", val);
+       return sprintf(buf, "%lu\n", val);
 }
 
 static void rps_dev_flow_table_release_work(struct work_struct *work)
@@ -650,33 +654,46 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
                                     struct rx_queue_attribute *attr,
                                     const char *buf, size_t len)
 {
-       unsigned int count;
-       char *endp;
+       unsigned long mask, count;
        struct rps_dev_flow_table *table, *old_table;
        static DEFINE_SPINLOCK(rps_dev_flow_lock);
+       int rc;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
-       count = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
+       rc = kstrtoul(buf, 0, &count);
+       if (rc < 0)
+               return rc;
 
        if (count) {
-               int i;
-
-               if (count > 1<<30) {
+               mask = count - 1;
+               /* mask = roundup_pow_of_two(count) - 1;
+                * without overflows...
+                */
+               while ((mask | (mask >> 1)) != mask)
+                       mask |= (mask >> 1);
+               /* On 64 bit arches, must check mask fits in table->mask (u32),
+                * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
+                * doesnt overflow.
+                */
+#if BITS_PER_LONG > 32
+               if (mask > (unsigned long)(u32)mask)
+                       return -EINVAL;
+#else
+               if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
+                               / sizeof(struct rps_dev_flow)) {
                        /* Enforce a limit to prevent overflow */
                        return -EINVAL;
                }
-               count = roundup_pow_of_two(count);
-               table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
+#endif
+               table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
                if (!table)
                        return -ENOMEM;
 
-               table->mask = count - 1;
-               for (i = 0; i < count; i++)
-                       table->flows[i].cpu = RPS_NO_CPU;
+               table->mask = mask;
+               for (count = 0; count <= mask; count++)
+                       table->flows[count].cpu = RPS_NO_CPU;
        } else
                table = NULL;
 
@@ -780,7 +797,7 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 #endif
 }
 
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
 /*
  * netdev_queue sysfs structures and functions.
  */
@@ -826,6 +843,133 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = {
        .store = netdev_queue_attr_store,
 };
 
+static ssize_t show_trans_timeout(struct netdev_queue *queue,
+                                 struct netdev_queue_attribute *attribute,
+                                 char *buf)
+{
+       unsigned long trans_timeout;
+
+       spin_lock_irq(&queue->_xmit_lock);
+       trans_timeout = queue->trans_timeout;
+       spin_unlock_irq(&queue->_xmit_lock);
+
+       return sprintf(buf, "%lu", trans_timeout);
+}
+
+static struct netdev_queue_attribute queue_trans_timeout =
+       __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
+
+#ifdef CONFIG_BQL
+/*
+ * Byte queue limits sysfs structures and functions.
+ */
+static ssize_t bql_show(char *buf, unsigned int value)
+{
+       return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t bql_set(const char *buf, const size_t count,
+                      unsigned int *pvalue)
+{
+       unsigned int value;
+       int err;
+
+       if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
+               value = DQL_MAX_LIMIT;
+       else {
+               err = kstrtouint(buf, 10, &value);
+               if (err < 0)
+                       return err;
+               if (value > DQL_MAX_LIMIT)
+                       return -EINVAL;
+       }
+
+       *pvalue = value;
+
+       return count;
+}
+
+static ssize_t bql_show_hold_time(struct netdev_queue *queue,
+                                 struct netdev_queue_attribute *attr,
+                                 char *buf)
+{
+       struct dql *dql = &queue->dql;
+
+       return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
+}
+
+static ssize_t bql_set_hold_time(struct netdev_queue *queue,
+                                struct netdev_queue_attribute *attribute,
+                                const char *buf, size_t len)
+{
+       struct dql *dql = &queue->dql;
+       unsigned value;
+       int err;
+
+       err = kstrtouint(buf, 10, &value);
+       if (err < 0)
+               return err;
+
+       dql->slack_hold_time = msecs_to_jiffies(value);
+
+       return len;
+}
+
+static struct netdev_queue_attribute bql_hold_time_attribute =
+       __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
+           bql_set_hold_time);
+
+static ssize_t bql_show_inflight(struct netdev_queue *queue,
+                                struct netdev_queue_attribute *attr,
+                                char *buf)
+{
+       struct dql *dql = &queue->dql;
+
+       return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
+}
+
+static struct netdev_queue_attribute bql_inflight_attribute =
+       __ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL);
+
+#define BQL_ATTR(NAME, FIELD)                                          \
+static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,           \
+                                struct netdev_queue_attribute *attr,   \
+                                char *buf)                             \
+{                                                                      \
+       return bql_show(buf, queue->dql.FIELD);                         \
+}                                                                      \
+                                                                       \
+static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,            \
+                               struct netdev_queue_attribute *attr,    \
+                               const char *buf, size_t len)            \
+{                                                                      \
+       return bql_set(buf, len, &queue->dql.FIELD);                    \
+}                                                                      \
+                                                                       \
+static struct netdev_queue_attribute bql_ ## NAME ## _attribute =      \
+       __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
+           bql_set_ ## NAME);
+
+BQL_ATTR(limit, limit)
+BQL_ATTR(limit_max, max_limit)
+BQL_ATTR(limit_min, min_limit)
+
+static struct attribute *dql_attrs[] = {
+       &bql_limit_attribute.attr,
+       &bql_limit_max_attribute.attr,
+       &bql_limit_min_attribute.attr,
+       &bql_hold_time_attribute.attr,
+       &bql_inflight_attribute.attr,
+       NULL
+};
+
+static struct attribute_group dql_group = {
+       .name  = "byte_queue_limits",
+       .attrs  = dql_attrs,
+};
+#endif /* CONFIG_BQL */
+
+#ifdef CONFIG_XPS
 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
 {
        struct net_device *dev = queue->dev;
@@ -890,6 +1034,52 @@ static DEFINE_MUTEX(xps_map_mutex);
 #define xmap_dereference(P)            \
        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
 
+static void xps_queue_release(struct netdev_queue *queue)
+{
+       struct net_device *dev = queue->dev;
+       struct xps_dev_maps *dev_maps;
+       struct xps_map *map;
+       unsigned long index;
+       int i, pos, nonempty = 0;
+
+       index = get_netdev_queue_index(queue);
+
+       mutex_lock(&xps_map_mutex);
+       dev_maps = xmap_dereference(dev->xps_maps);
+
+       if (dev_maps) {
+               for_each_possible_cpu(i) {
+                       map = xmap_dereference(dev_maps->cpu_map[i]);
+                       if (!map)
+                               continue;
+
+                       for (pos = 0; pos < map->len; pos++)
+                               if (map->queues[pos] == index)
+                                       break;
+
+                       if (pos < map->len) {
+                               if (map->len > 1)
+                                       map->queues[pos] =
+                                           map->queues[--map->len];
+                               else {
+                                       RCU_INIT_POINTER(dev_maps->cpu_map[i],
+                                           NULL);
+                                       kfree_rcu(map, rcu);
+                                       map = NULL;
+                               }
+                       }
+                       if (map)
+                               nonempty = 1;
+               }
+
+               if (!nonempty) {
+                       RCU_INIT_POINTER(dev->xps_maps, NULL);
+                       kfree_rcu(dev_maps, rcu);
+               }
+       }
+       mutex_unlock(&xps_map_mutex);
+}
+
 static ssize_t store_xps_map(struct netdev_queue *queue,
                      struct netdev_queue_attribute *attribute,
                      const char *buf, size_t len)
@@ -901,7 +1091,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
        struct xps_map *map, *new_map;
        struct xps_dev_maps *dev_maps, *new_dev_maps;
        int nonempty = 0;
-       int numa_node = -2;
+       int numa_node_id = -2;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -944,10 +1134,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
                need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
 #ifdef CONFIG_NUMA
                if (need_set) {
-                       if (numa_node == -2)
-                               numa_node = cpu_to_node(cpu);
-                       else if (numa_node != cpu_to_node(cpu))
-                               numa_node = -1;
+                       if (numa_node_id == -2)
+                               numa_node_id = cpu_to_node(cpu);
+                       else if (numa_node_id != cpu_to_node(cpu))
+                               numa_node_id = -1;
                }
 #endif
                if (need_set && pos >= map_len) {
@@ -997,7 +1187,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
        if (dev_maps)
                kfree_rcu(dev_maps, rcu);
 
-       netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
+       netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
                                            NUMA_NO_NODE);
 
        mutex_unlock(&xps_map_mutex);
@@ -1020,58 +1210,23 @@ error:
 
 static struct netdev_queue_attribute xps_cpus_attribute =
     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+#endif /* CONFIG_XPS */
 
 static struct attribute *netdev_queue_default_attrs[] = {
+       &queue_trans_timeout.attr,
+#ifdef CONFIG_XPS
        &xps_cpus_attribute.attr,
+#endif
        NULL
 };
 
 static void netdev_queue_release(struct kobject *kobj)
 {
        struct netdev_queue *queue = to_netdev_queue(kobj);
-       struct net_device *dev = queue->dev;
-       struct xps_dev_maps *dev_maps;
-       struct xps_map *map;
-       unsigned long index;
-       int i, pos, nonempty = 0;
-
-       index = get_netdev_queue_index(queue);
-
-       mutex_lock(&xps_map_mutex);
-       dev_maps = xmap_dereference(dev->xps_maps);
-
-       if (dev_maps) {
-               for_each_possible_cpu(i) {
-                       map = xmap_dereference(dev_maps->cpu_map[i]);
-                       if (!map)
-                               continue;
 
-                       for (pos = 0; pos < map->len; pos++)
-                               if (map->queues[pos] == index)
-                                       break;
-
-                       if (pos < map->len) {
-                               if (map->len > 1)
-                                       map->queues[pos] =
-                                           map->queues[--map->len];
-                               else {
-                                       RCU_INIT_POINTER(dev_maps->cpu_map[i],
-                                           NULL);
-                                       kfree_rcu(map, rcu);
-                                       map = NULL;
-                               }
-                       }
-                       if (map)
-                               nonempty = 1;
-               }
-
-               if (!nonempty) {
-                       RCU_INIT_POINTER(dev->xps_maps, NULL);
-                       kfree_rcu(dev_maps, rcu);
-               }
-       }
-
-       mutex_unlock(&xps_map_mutex);
+#ifdef CONFIG_XPS
+       xps_queue_release(queue);
+#endif
 
        memset(kobj, 0, sizeof(*kobj));
        dev_put(queue->dev);
@@ -1092,22 +1247,29 @@ static int netdev_queue_add_kobject(struct net_device *net, int index)
        kobj->kset = net->queues_kset;
        error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
            "tx-%u", index);
-       if (error) {
-               kobject_put(kobj);
-               return error;
-       }
+       if (error)
+               goto exit;
+
+#ifdef CONFIG_BQL
+       error = sysfs_create_group(kobj, &dql_group);
+       if (error)
+               goto exit;
+#endif
 
        kobject_uevent(kobj, KOBJ_ADD);
        dev_hold(queue->dev);
 
+       return 0;
+exit:
+       kobject_put(kobj);
        return error;
 }
-#endif /* CONFIG_XPS */
+#endif /* CONFIG_SYSFS */
 
 int
 netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 {
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
        int i;
        int error = 0;
 
@@ -1119,20 +1281,26 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
                }
        }
 
-       while (--i >= new_num)
-               kobject_put(&net->_tx[i].kobj);
+       while (--i >= new_num) {
+               struct netdev_queue *queue = net->_tx + i;
+
+#ifdef CONFIG_BQL
+               sysfs_remove_group(&queue->kobj, &dql_group);
+#endif
+               kobject_put(&queue->kobj);
+       }
 
        return error;
 #else
        return 0;
-#endif
+#endif /* CONFIG_SYSFS */
 }
 
 static int register_queue_kobjects(struct net_device *net)
 {
        int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
 
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        net->queues_kset = kset_create_and_add("queues",
            NULL, &net->dev.kobj);
        if (!net->queues_kset)
@@ -1173,7 +1341,7 @@ static void remove_queue_kobjects(struct net_device *net)
 
        net_rx_queue_update_kobjects(net, real_rx, 0);
        netdev_queue_update_kobjects(net, real_tx, 0);
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        kset_unregister(net->queues_kset);
 #endif
 }
index cf64c1ffa4cd95c3368de492916f0b95ded7ce64..0d38808a2305d06c642f7192ece302c9b2a971c2 100644 (file)
@@ -76,7 +76,7 @@ static void queue_process(struct work_struct *work)
 
                local_irq_save(flags);
                __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_frozen_or_stopped(txq) ||
+               if (netif_xmit_frozen_or_stopped(txq) ||
                    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
                        __netif_tx_unlock(txq);
@@ -317,7 +317,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
-                               if (!netif_tx_queue_stopped(txq)) {
+                               if (!netif_xmit_stopped(txq)) {
                                        status = ops->ndo_start_xmit(skb, dev);
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
@@ -422,6 +422,7 @@ static void arp_reply(struct sk_buff *skb)
        struct sk_buff *send_skb;
        struct netpoll *np, *tmp;
        unsigned long flags;
+       int hlen, tlen;
        int hits = 0;
 
        if (list_empty(&npinfo->rx_np))
@@ -479,8 +480,9 @@ static void arp_reply(struct sk_buff *skb)
                if (tip != np->local_ip)
                        continue;
 
-               send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
-                                   LL_RESERVED_SPACE(np->dev));
+               hlen = LL_RESERVED_SPACE(np->dev);
+               tlen = np->dev->needed_tailroom;
+               send_skb = find_skb(np, size + hlen + tlen, hlen);
                if (!send_skb)
                        continue;
 
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
new file mode 100644 (file)
index 0000000..3a9fd48
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ * net/core/netprio_cgroup.c   Priority Control Group
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ * Authors:    Neil Horman <nhorman@tuxdriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/cgroup.h>
+#include <linux/rcupdate.h>
+#include <linux/atomic.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+#include <net/netprio_cgroup.h>
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+                                              struct cgroup *cgrp);
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
+
+struct cgroup_subsys net_prio_subsys = {
+       .name           = "net_prio",
+       .create         = cgrp_create,
+       .destroy        = cgrp_destroy,
+       .populate       = cgrp_populate,
+#ifdef CONFIG_NETPRIO_CGROUP
+       .subsys_id      = net_prio_subsys_id,
+#endif
+       .module         = THIS_MODULE
+};
+
+#define PRIOIDX_SZ 128
+
+static unsigned long prioidx_map[PRIOIDX_SZ];
+static DEFINE_SPINLOCK(prioidx_map_lock);
+static atomic_t max_prioidx = ATOMIC_INIT(0);
+
+static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
+{
+       return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
+                           struct cgroup_netprio_state, css);
+}
+
+static int get_prioidx(u32 *prio)
+{
+       unsigned long flags;
+       u32 prioidx;
+
+       spin_lock_irqsave(&prioidx_map_lock, flags);
+       prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
+       set_bit(prioidx, prioidx_map);
+       spin_unlock_irqrestore(&prioidx_map_lock, flags);
+       if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
+               return -ENOSPC;
+
+       atomic_set(&max_prioidx, prioidx);
+       *prio = prioidx;
+       return 0;
+}
+
+static void put_prioidx(u32 idx)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&prioidx_map_lock, flags);
+       clear_bit(idx, prioidx_map);
+       spin_unlock_irqrestore(&prioidx_map_lock, flags);
+}
+
+static void extend_netdev_table(struct net_device *dev, u32 new_len)
+{
+       size_t new_size = sizeof(struct netprio_map) +
+                          ((sizeof(u32) * new_len));
+       struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
+       struct netprio_map *old_priomap;
+       int i;
+
+       old_priomap  = rtnl_dereference(dev->priomap);
+
+       if (!new_priomap) {
+               printk(KERN_WARNING "Unable to alloc new priomap!\n");
+               return;
+       }
+
+       for (i = 0;
+            old_priomap && (i < old_priomap->priomap_len);
+            i++)
+               new_priomap->priomap[i] = old_priomap->priomap[i];
+
+       new_priomap->priomap_len = new_len;
+
+       rcu_assign_pointer(dev->priomap, new_priomap);
+       if (old_priomap)
+               kfree_rcu(old_priomap, rcu);
+}
+
+static void update_netdev_tables(void)
+{
+       struct net_device *dev;
+       u32 max_len = atomic_read(&max_prioidx);
+       struct netprio_map *map;
+
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               map = rtnl_dereference(dev->priomap);
+               if ((!map) ||
+                   (map->priomap_len < max_len))
+                       extend_netdev_table(dev, max_len);
+       }
+       rtnl_unlock();
+}
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+                                                struct cgroup *cgrp)
+{
+       struct cgroup_netprio_state *cs;
+       int ret;
+
+       cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+       if (!cs)
+               return ERR_PTR(-ENOMEM);
+
+       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
+               kfree(cs);
+               return ERR_PTR(-EINVAL);
+       }
+
+       ret = get_prioidx(&cs->prioidx);
+       if (ret != 0) {
+               printk(KERN_WARNING "No space in priority index array\n");
+               kfree(cs);
+               return ERR_PTR(ret);
+       }
+
+       return &cs->css;
+}
+
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+       struct cgroup_netprio_state *cs;
+       struct net_device *dev;
+       struct netprio_map *map;
+
+       cs = cgrp_netprio_state(cgrp);
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               map = rtnl_dereference(dev->priomap);
+               if (map)
+                       map->priomap[cs->prioidx] = 0;
+       }
+       rtnl_unlock();
+       put_prioidx(cs->prioidx);
+       kfree(cs);
+}
+
+static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
+{
+       return (u64)cgrp_netprio_state(cgrp)->prioidx;
+}
+
+static int read_priomap(struct cgroup *cont, struct cftype *cft,
+                       struct cgroup_map_cb *cb)
+{
+       struct net_device *dev;
+       u32 prioidx = cgrp_netprio_state(cont)->prioidx;
+       u32 priority;
+       struct netprio_map *map;
+
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, dev) {
+               map = rcu_dereference(dev->priomap);
+               priority = map ? map->priomap[prioidx] : 0;
+               cb->fill(cb, dev->name, priority);
+       }
+       rcu_read_unlock();
+       return 0;
+}
+
+static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
+                        const char *buffer)
+{
+       char *devname = kstrdup(buffer, GFP_KERNEL);
+       int ret = -EINVAL;
+       u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
+       unsigned long priority;
+       char *priostr;
+       struct net_device *dev;
+       struct netprio_map *map;
+
+       if (!devname)
+               return -ENOMEM;
+
+       /*
+        * Minimally sized valid priomap string
+        */
+       if (strlen(devname) < 3)
+               goto out_free_devname;
+
+       priostr = strstr(devname, " ");
+       if (!priostr)
+               goto out_free_devname;
+
+       /*
+        *Separate the devname from the associated priority
+        *and advance the priostr poitner to the priority value
+        */
+       *priostr = '\0';
+       priostr++;
+
+       /*
+        * If the priostr points to NULL, we're at the end of the passed
+        * in string, and its not a valid write
+        */
+       if (*priostr == '\0')
+               goto out_free_devname;
+
+       ret = kstrtoul(priostr, 10, &priority);
+       if (ret < 0)
+               goto out_free_devname;
+
+       ret = -ENODEV;
+
+       dev = dev_get_by_name(&init_net, devname);
+       if (!dev)
+               goto out_free_devname;
+
+       update_netdev_tables();
+       ret = 0;
+       rcu_read_lock();
+       map = rcu_dereference(dev->priomap);
+       if (map)
+               map->priomap[prioidx] = priority;
+       rcu_read_unlock();
+       dev_put(dev);
+
+out_free_devname:
+       kfree(devname);
+       return ret;
+}
+
+static struct cftype ss_files[] = {
+       {
+               .name = "prioidx",
+               .read_u64 = read_prioidx,
+       },
+       {
+               .name = "ifpriomap",
+               .read_map = read_priomap,
+               .write_string = write_priomap,
+       },
+};
+
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+       return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
+}
+
+static int netprio_device_event(struct notifier_block *unused,
+                               unsigned long event, void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct netprio_map *old;
+       u32 max_len = atomic_read(&max_prioidx);
+
+       /*
+        * Note this is called with rtnl_lock held so we have update side
+        * protection on our rcu assignments
+        */
+
+       switch (event) {
+
+       case NETDEV_REGISTER:
+               if (max_len)
+                       extend_netdev_table(dev, max_len);
+               break;
+       case NETDEV_UNREGISTER:
+               old = rtnl_dereference(dev->priomap);
+               RCU_INIT_POINTER(dev->priomap, NULL);
+               if (old)
+                       kfree_rcu(old, rcu);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block netprio_device_notifier = {
+       .notifier_call = netprio_device_event
+};
+
+static int __init init_cgroup_netprio(void)
+{
+       int ret;
+
+       ret = cgroup_load_subsys(&net_prio_subsys);
+       if (ret)
+               goto out;
+#ifndef CONFIG_NETPRIO_CGROUP
+       smp_wmb();
+       net_prio_subsys_id = net_prio_subsys.subsys_id;
+#endif
+
+       register_netdevice_notifier(&netprio_device_notifier);
+
+out:
+       return ret;
+}
+
+static void __exit exit_cgroup_netprio(void)
+{
+       struct netprio_map *old;
+       struct net_device *dev;
+
+       unregister_netdevice_notifier(&netprio_device_notifier);
+
+       cgroup_unload_subsys(&net_prio_subsys);
+
+#ifndef CONFIG_NETPRIO_CGROUP
+       net_prio_subsys_id = -1;
+       synchronize_rcu();
+#endif
+
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               old = rtnl_dereference(dev->priomap);
+               RCU_INIT_POINTER(dev->priomap, NULL);
+               if (old)
+                       kfree_rcu(old, rcu);
+       }
+       rtnl_unlock();
+}
+
+module_init(init_cgroup_netprio);
+module_exit(exit_cgroup_netprio);
+MODULE_LICENSE("GPL v2");
index 0001c243b35cb4cc2aadebd72fd00679adc6e002..449fe0f068f8e91c8ae448cbf821cca9ad69a2b2 100644 (file)
@@ -1304,7 +1304,7 @@ static ssize_t pktgen_if_write(struct file *file,
                scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
                snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
 
-               ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
+               pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
 
                if (debug)
                        printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
@@ -1327,8 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file,
                scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
                snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
 
-               ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
-                              &pkt_dev->min_in6_daddr);
+               pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
                if (debug)
                        printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
 
@@ -1371,7 +1370,7 @@ static ssize_t pktgen_if_write(struct file *file,
                scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
                snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
 
-               ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
+               pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
 
                if (debug)
                        printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
@@ -2079,9 +2078,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
                                     ifp = ifp->if_next) {
                                        if (ifp->scope == IFA_LINK &&
                                            !(ifp->flags & IFA_F_TENTATIVE)) {
-                                               ipv6_addr_copy(&pkt_dev->
-                                                              cur_in6_saddr,
-                                                              &ifp->addr);
+                                               pkt_dev->cur_in6_saddr = ifp->addr;
                                                err = 0;
                                                break;
                                        }
@@ -2958,8 +2955,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        iph->payload_len = htons(sizeof(struct udphdr) + datalen);
        iph->nexthdr = IPPROTO_UDP;
 
-       ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
-       ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
+       iph->daddr = pkt_dev->cur_in6_daddr;
+       iph->saddr = pkt_dev->cur_in6_saddr;
 
        skb->mac_header = (skb->network_header - ETH_HLEN -
                           pkt_dev->pkt_overhead);
@@ -3345,7 +3342,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 
        __netif_tx_lock_bh(txq);
 
-       if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
+       if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
                ret = NETDEV_TX_BUSY;
                pkt_dev->last_ok = 0;
                goto unlock;
index 182236b2510aeb16e6e0e2026264d683edf93e7e..9b570a6a33c5d8c52d777e160742dc31ec350c16 100644 (file)
  * but then some measure against one socket starving all other sockets
  * would be needed.
  *
- * It was 128 by default. Experiments with real servers show, that
+ * The minimum value of it is 128. Experiments with real servers show that
  * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * of problems.
+ * This value is adjusted to 128 for low memory machines,
+ * and it will increase in proportion to the memory of machine.
  * Note : Dont forget somaxconn that may limit backlog too.
  */
 int sysctl_max_syn_backlog = 256;
index 9083e82bdae506d4a0cedc1d8eb385870d62b0dd..dbf2ddafd52d34952161103c2586931c845926cf 100644 (file)
@@ -273,6 +273,17 @@ EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 
 static LIST_HEAD(link_ops);
 
+static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+{
+       const struct rtnl_link_ops *ops;
+
+       list_for_each_entry(ops, &link_ops, list) {
+               if (!strcmp(ops->kind, kind))
+                       return ops;
+       }
+       return NULL;
+}
+
 /**
  * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  * @ops: struct rtnl_link_ops * to register
@@ -285,6 +296,9 @@ static LIST_HEAD(link_ops);
  */
 int __rtnl_link_register(struct rtnl_link_ops *ops)
 {
+       if (rtnl_link_ops_get(ops->kind))
+               return -EEXIST;
+
        if (!ops->dellink)
                ops->dellink = unregister_netdevice_queue;
 
@@ -351,17 +365,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
-static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
-{
-       const struct rtnl_link_ops *ops;
-
-       list_for_each_entry(ops, &link_ops, list) {
-               if (!strcmp(ops->kind, kind))
-                       return ops;
-       }
-       return NULL;
-}
-
 static size_t rtnl_link_get_size(const struct net_device *dev)
 {
        const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
index 025233de25f969cb67e075d6ea5c4a9bba746bd9..6fd44606fdd130a12712ac3f67ea6f92befab574 100644 (file)
@@ -19,6 +19,7 @@ static int __init net_secret_init(void)
 }
 late_initcall(net_secret_init);
 
+#ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
 {
        /*
@@ -33,8 +34,9 @@ static u32 seq_scale(u32 seq)
         */
        return seq + (ktime_to_ns(ktime_get_real()) >> 6);
 }
+#endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
                                   __be16 sport, __be16 dport)
 {
@@ -132,7 +134,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
 #endif
 
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+#if IS_ENABLED(CONFIG_IP_DCCP)
 u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
                                __be16 sport, __be16 dport)
 {
@@ -154,7 +156,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
 }
 EXPORT_SYMBOL(secure_dccp_sequence_number);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
                                  __be16 sport, __be16 dport)
 {
index 06438f926022b1ce3ee38651d991b014dd1ea019..da0c97f2fab446b551e79abd65bb485316ae53fe 100644 (file)
@@ -244,6 +244,55 @@ nodata:
 }
 EXPORT_SYMBOL(__alloc_skb);
 
+/**
+ * build_skb - build a network buffer
+ * @data: data buffer provided by caller
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+ *  Before IO, driver allocates only data buffer where NIC put incoming frame
+ *  Driver should add room at head (NET_SKB_PAD) and
+ *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
+ *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
+ *  before giving packet to stack.
+ *  RX rings only contains data buffers, not full skbs.
+ */
+struct sk_buff *build_skb(void *data)
+{
+       struct skb_shared_info *shinfo;
+       struct sk_buff *skb;
+       unsigned int size;
+
+       skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+       if (!skb)
+               return NULL;
+
+       size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       memset(skb, 0, offsetof(struct sk_buff, tail));
+       skb->truesize = SKB_TRUESIZE(size);
+       atomic_set(&skb->users, 1);
+       skb->head = data;
+       skb->data = data;
+       skb_reset_tail_pointer(skb);
+       skb->end = skb->tail + size;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->mac_header = ~0U;
+#endif
+
+       /* make sure we initialize shinfo sequentially */
+       shinfo = skb_shinfo(skb);
+       memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+       atomic_set(&shinfo->dataref, 1);
+       kmemcheck_annotate_variable(shinfo->destructor_arg);
+
+       return skb;
+}
+EXPORT_SYMBOL(build_skb);
+
 /**
  *     __netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *     @dev: network device to receive on
@@ -403,7 +452,7 @@ static void skb_release_head_state(struct sk_buff *skb)
                WARN_ON(in_irq());
                skb->destructor(skb);
        }
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        nf_conntrack_put(skb->nfct);
 #endif
 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
@@ -553,15 +602,14 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->ip_summed          = old->ip_summed;
        skb_copy_queue_mapping(new, old);
        new->priority           = old->priority;
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+#if IS_ENABLED(CONFIG_IP_VS)
        new->ipvs_property      = old->ipvs_property;
 #endif
        new->protocol           = old->protocol;
        new->mark               = old->mark;
        new->skb_iif            = old->skb_iif;
        __nf_copy(new, old);
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
        new->nf_trace           = old->nf_trace;
 #endif
 #ifdef CONFIG_NET_SCHED
@@ -791,8 +839,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 EXPORT_SYMBOL(skb_copy);
 
 /**
- *     pskb_copy       -       create copy of an sk_buff with private head.
+ *     __pskb_copy     -       create copy of an sk_buff with private head.
  *     @skb: buffer to copy
+ *     @headroom: headroom of new skb
  *     @gfp_mask: allocation priority
  *
  *     Make a copy of both an &sk_buff and part of its data, located
@@ -803,16 +852,16 @@ EXPORT_SYMBOL(skb_copy);
  *     The returned buffer has a reference count of 1.
  */
 
-struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
 {
-       unsigned int size = skb_end_pointer(skb) - skb->head;
+       unsigned int size = skb_headlen(skb) + headroom;
        struct sk_buff *n = alloc_skb(size, gfp_mask);
 
        if (!n)
                goto out;
 
        /* Set the data pointer */
-       skb_reserve(n, skb_headroom(skb));
+       skb_reserve(n, headroom);
        /* Set the tail pointer and length */
        skb_put(n, skb_headlen(skb));
        /* Copy the bytes */
@@ -848,7 +897,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 out:
        return n;
 }
-EXPORT_SYMBOL(pskb_copy);
+EXPORT_SYMBOL(__pskb_copy);
 
 /**
  *     pskb_expand_head - reallocate header of &sk_buff
@@ -2230,7 +2279,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
  * @shiftlen: shift up to this many bytes
  *
  * Attempts to shift up to shiftlen worth of bytes, which may be less than
- * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * the length of the skb, from skb to tgt. Returns number bytes shifted.
  * It's up to caller to free skb if everything was shifted.
  *
  * If @tgt runs out of frags, the whole operation is aborted.
@@ -2621,7 +2670,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  *     a pointer to the first in a list of new skbs for the segments.
  *     In case of error it returns ERR_PTR(err).
  */
-struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
index cbdf51c0d5acbb7334ec4e6c3c621ab882d937d8..002939cfc069d2fe5eabe9b8ec4cefc1e20b2112 100644 (file)
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
+#include <linux/jump_label.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <net/xfrm.h>
 #include <linux/ipsec.h>
 #include <net/cls_cgroup.h>
+#include <net/netprio_cgroup.h>
 
 #include <linux/filter.h>
 
 #include <net/tcp.h>
 #endif
 
+static DEFINE_MUTEX(proto_list_mutex);
+static LIST_HEAD(proto_list);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct proto *proto;
+       int ret = 0;
+
+       mutex_lock(&proto_list_mutex);
+       list_for_each_entry(proto, &proto_list, node) {
+               if (proto->init_cgroup) {
+                       ret = proto->init_cgroup(cgrp, ss);
+                       if (ret)
+                               goto out;
+               }
+       }
+
+       mutex_unlock(&proto_list_mutex);
+       return ret;
+out:
+       list_for_each_entry_continue_reverse(proto, &proto_list, node)
+               if (proto->destroy_cgroup)
+                       proto->destroy_cgroup(cgrp, ss);
+       mutex_unlock(&proto_list_mutex);
+       return ret;
+}
+
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct proto *proto;
+
+       mutex_lock(&proto_list_mutex);
+       list_for_each_entry_reverse(proto, &proto_list, node)
+               if (proto->destroy_cgroup)
+                       proto->destroy_cgroup(cgrp, ss);
+       mutex_unlock(&proto_list_mutex);
+}
+#endif
+
 /*
  * Each address family might have different locking rules, so we have
  * one slock key per address family:
 static struct lock_class_key af_family_keys[AF_MAX];
 static struct lock_class_key af_family_slock_keys[AF_MAX];
 
+struct jump_label_key memcg_socket_limit_enabled;
+EXPORT_SYMBOL(memcg_socket_limit_enabled);
+
 /*
  * Make lock validator output more readable. (we pre-construct these
  * strings build-time, so that runtime initialization of socket
@@ -221,10 +266,16 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 EXPORT_SYMBOL(sysctl_optmem_max);
 
-#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
+#if defined(CONFIG_CGROUPS)
+#if !defined(CONFIG_NET_CLS_CGROUP)
 int net_cls_subsys_id = -1;
 EXPORT_SYMBOL_GPL(net_cls_subsys_id);
 #endif
+#if !defined(CONFIG_NETPRIO_CGROUP)
+int net_prio_subsys_id = -1;
+EXPORT_SYMBOL_GPL(net_prio_subsys_id);
+#endif
+#endif
 
 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 {
@@ -269,14 +320,14 @@ static void sock_warn_obsolete_bsdism(const char *name)
        }
 }
 
-static void sock_disable_timestamp(struct sock *sk, int flag)
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
+static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 {
-       if (sock_flag(sk, flag)) {
-               sock_reset_flag(sk, flag);
-               if (!sock_flag(sk, SOCK_TIMESTAMP) &&
-                   !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
+       if (sk->sk_flags & flags) {
+               sk->sk_flags &= ~flags;
+               if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
                        net_disable_timestamp();
-               }
        }
 }
 
@@ -288,11 +339,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-          number of warnings when compiling with -W --ANK
-        */
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
@@ -682,7 +729,7 @@ set_rcvbuf:
                                              SOCK_TIMESTAMPING_RX_SOFTWARE);
                else
                        sock_disable_timestamp(sk,
-                                              SOCK_TIMESTAMPING_RX_SOFTWARE);
+                                              (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
                sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
                                  val & SOF_TIMESTAMPING_SOFTWARE);
                sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
@@ -1120,6 +1167,18 @@ void sock_update_classid(struct sock *sk)
                sk->sk_classid = classid;
 }
 EXPORT_SYMBOL(sock_update_classid);
+
+void sock_update_netprioidx(struct sock *sk)
+{
+       struct cgroup_netprio_state *state;
+       if (in_interrupt())
+               return;
+       rcu_read_lock();
+       state = task_netprio_state(current);
+       sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(sock_update_netprioidx);
 #endif
 
 /**
@@ -1147,6 +1206,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                atomic_set(&sk->sk_wmem_alloc, 1);
 
                sock_update_classid(sk);
+               sock_update_netprioidx(sk);
        }
 
        return sk;
@@ -1167,8 +1227,7 @@ static void __sk_free(struct sock *sk)
                RCU_INIT_POINTER(sk->sk_filter, NULL);
        }
 
-       sock_disable_timestamp(sk, SOCK_TIMESTAMP);
-       sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
+       sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
        if (atomic_read(&sk->sk_omem_alloc))
                printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
@@ -1213,7 +1272,14 @@ void sk_release_kernel(struct sock *sk)
 }
 EXPORT_SYMBOL(sk_release_kernel);
 
-struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+/**
+ *     sk_clone_lock - clone a socket, and lock its clone
+ *     @sk: the socket to clone
+ *     @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ *     Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 {
        struct sock *newsk;
 
@@ -1297,16 +1363,15 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                newsk->sk_wq = NULL;
 
                if (newsk->sk_prot->sockets_allocated)
-                       percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+                       sk_sockets_allocated_inc(newsk);
 
-               if (sock_flag(newsk, SOCK_TIMESTAMP) ||
-                   sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
+               if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
                        net_enable_timestamp();
        }
 out:
        return newsk;
 }
-EXPORT_SYMBOL_GPL(sk_clone);
+EXPORT_SYMBOL_GPL(sk_clone_lock);
 
 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 {
@@ -1686,30 +1751,34 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
        struct proto *prot = sk->sk_prot;
        int amt = sk_mem_pages(size);
        long allocated;
+       int parent_status = UNDER_LIMIT;
 
        sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
-       allocated = atomic_long_add_return(amt, prot->memory_allocated);
+
+       allocated = sk_memory_allocated_add(sk, amt, &parent_status);
 
        /* Under limit. */
-       if (allocated <= prot->sysctl_mem[0]) {
-               if (prot->memory_pressure && *prot->memory_pressure)
-                       *prot->memory_pressure = 0;
+       if (parent_status == UNDER_LIMIT &&
+                       allocated <= sk_prot_mem_limits(sk, 0)) {
+               sk_leave_memory_pressure(sk);
                return 1;
        }
 
-       /* Under pressure. */
-       if (allocated > prot->sysctl_mem[1])
-               if (prot->enter_memory_pressure)
-                       prot->enter_memory_pressure(sk);
+       /* Under pressure. (we or our parents) */
+       if ((parent_status > SOFT_LIMIT) ||
+                       allocated > sk_prot_mem_limits(sk, 1))
+               sk_enter_memory_pressure(sk);
 
-       /* Over hard limit. */
-       if (allocated > prot->sysctl_mem[2])
+       /* Over hard limit (we or our parents) */
+       if ((parent_status == OVER_LIMIT) ||
+                       (allocated > sk_prot_mem_limits(sk, 2)))
                goto suppress_allocation;
 
        /* guarantee minimum buffer size under pressure */
        if (kind == SK_MEM_RECV) {
                if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
                        return 1;
+
        } else { /* SK_MEM_SEND */
                if (sk->sk_type == SOCK_STREAM) {
                        if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
@@ -1719,13 +1788,13 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
                                return 1;
        }
 
-       if (prot->memory_pressure) {
+       if (sk_has_memory_pressure(sk)) {
                int alloc;
 
-               if (!*prot->memory_pressure)
+               if (!sk_under_memory_pressure(sk))
                        return 1;
-               alloc = percpu_counter_read_positive(prot->sockets_allocated);
-               if (prot->sysctl_mem[2] > alloc *
+               alloc = sk_sockets_allocated_read_positive(sk);
+               if (sk_prot_mem_limits(sk, 2) > alloc *
                    sk_mem_pages(sk->sk_wmem_queued +
                                 atomic_read(&sk->sk_rmem_alloc) +
                                 sk->sk_forward_alloc))
@@ -1748,7 +1817,9 @@ suppress_allocation:
 
        /* Alas. Undo changes. */
        sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
-       atomic_long_sub(amt, prot->memory_allocated);
+
+       sk_memory_allocated_sub(sk, amt, parent_status);
+
        return 0;
 }
 EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1759,15 +1830,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
  */
 void __sk_mem_reclaim(struct sock *sk)
 {
-       struct proto *prot = sk->sk_prot;
-
-       atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
-                  prot->memory_allocated);
+       sk_memory_allocated_sub(sk,
+                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
        sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
 
-       if (prot->memory_pressure && *prot->memory_pressure &&
-           (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
-               *prot->memory_pressure = 0;
+       if (sk_under_memory_pressure(sk) &&
+           (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+               sk_leave_memory_pressure(sk);
 }
 EXPORT_SYMBOL(__sk_mem_reclaim);
 
@@ -2138,16 +2207,15 @@ EXPORT_SYMBOL(sock_get_timestampns);
 void sock_enable_timestamp(struct sock *sk, int flag)
 {
        if (!sock_flag(sk, flag)) {
+               unsigned long previous_flags = sk->sk_flags;
+
                sock_set_flag(sk, flag);
                /*
                 * we just set one of the two flags which require net
                 * time stamping, but time stamping might have been on
                 * already because of the other one
                 */
-               if (!sock_flag(sk,
-                               flag == SOCK_TIMESTAMP ?
-                               SOCK_TIMESTAMPING_RX_SOFTWARE :
-                               SOCK_TIMESTAMP))
+               if (!(previous_flags & SK_FLAGS_TIMESTAMP))
                        net_enable_timestamp();
        }
 }
@@ -2259,9 +2327,6 @@ void sk_common_release(struct sock *sk)
 }
 EXPORT_SYMBOL(sk_common_release);
 
-static DEFINE_RWLOCK(proto_list_lock);
-static LIST_HEAD(proto_list);
-
 #ifdef CONFIG_PROC_FS
 #define PROTO_INUSE_NR 64      /* should be enough for the first time */
 struct prot_inuse {
@@ -2410,10 +2475,10 @@ int proto_register(struct proto *prot, int alloc_slab)
                }
        }
 
-       write_lock(&proto_list_lock);
+       mutex_lock(&proto_list_mutex);
        list_add(&prot->node, &proto_list);
        assign_proto_idx(prot);
-       write_unlock(&proto_list_lock);
+       mutex_unlock(&proto_list_mutex);
        return 0;
 
 out_free_timewait_sock_slab_name:
@@ -2436,10 +2501,10 @@ EXPORT_SYMBOL(proto_register);
 
 void proto_unregister(struct proto *prot)
 {
-       write_lock(&proto_list_lock);
+       mutex_lock(&proto_list_mutex);
        release_proto_idx(prot);
        list_del(&prot->node);
-       write_unlock(&proto_list_lock);
+       mutex_unlock(&proto_list_mutex);
 
        if (prot->slab != NULL) {
                kmem_cache_destroy(prot->slab);
@@ -2462,9 +2527,9 @@ EXPORT_SYMBOL(proto_unregister);
 
 #ifdef CONFIG_PROC_FS
 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(proto_list_lock)
+       __acquires(proto_list_mutex)
 {
-       read_lock(&proto_list_lock);
+       mutex_lock(&proto_list_mutex);
        return seq_list_start_head(&proto_list, *pos);
 }
 
@@ -2474,25 +2539,36 @@ static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void proto_seq_stop(struct seq_file *seq, void *v)
-       __releases(proto_list_lock)
+       __releases(proto_list_mutex)
 {
-       read_unlock(&proto_list_lock);
+       mutex_unlock(&proto_list_mutex);
 }
 
 static char proto_method_implemented(const void *method)
 {
        return method == NULL ? 'n' : 'y';
 }
+static long sock_prot_memory_allocated(struct proto *proto)
+{
+       return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
+}
+
+static char *sock_prot_memory_pressure(struct proto *proto)
+{
+       return proto->memory_pressure != NULL ?
+       proto_memory_pressure(proto) ? "yes" : "no" : "NI";
+}
 
 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
 {
+
        seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
                        "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
                   proto->name,
                   proto->obj_size,
                   sock_prot_inuse_get(seq_file_net(seq), proto),
-                  proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
-                  proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+                  sock_prot_memory_allocated(proto),
+                  sock_prot_memory_pressure(proto),
                   proto->max_header,
                   proto->slab == NULL ? "no" : "yes",
                   module_name(proto->owner),
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
new file mode 100644 (file)
index 0000000..b9868e1
--- /dev/null
@@ -0,0 +1,192 @@
+#include <linux/mutex.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <net/sock.h>
+
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
+static DEFINE_MUTEX(sock_diag_table_mutex);
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie)
+{
+       if ((cookie[0] != INET_DIAG_NOCOOKIE ||
+            cookie[1] != INET_DIAG_NOCOOKIE) &&
+           ((u32)(unsigned long)sk != cookie[0] ||
+            (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
+               return -ESTALE;
+       else
+               return 0;
+}
+EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
+
+void sock_diag_save_cookie(void *sk, __u32 *cookie)
+{
+       cookie[0] = (u32)(unsigned long)sk;
+       cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+}
+EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
+
+int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
+{
+       __u32 *mem;
+
+       mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
+
+       mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
+       mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+       mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+       mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+       mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+       mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+       mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+
+       return 0;
+
+rtattr_failure:
+       return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+       mutex_lock(&sock_diag_table_mutex);
+       inet_rcv_compat = fn;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
+
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+       mutex_lock(&sock_diag_table_mutex);
+       inet_rcv_compat = NULL;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
+
+int sock_diag_register(struct sock_diag_handler *hndl)
+{
+       int err = 0;
+
+       if (hndl->family >= AF_MAX)
+               return -EINVAL;
+
+       mutex_lock(&sock_diag_table_mutex);
+       if (sock_diag_handlers[hndl->family])
+               err = -EBUSY;
+       else
+               sock_diag_handlers[hndl->family] = hndl;
+       mutex_unlock(&sock_diag_table_mutex);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sock_diag_register);
+
+void sock_diag_unregister(struct sock_diag_handler *hnld)
+{
+       int family = hnld->family;
+
+       if (family >= AF_MAX)
+               return;
+
+       mutex_lock(&sock_diag_table_mutex);
+       BUG_ON(sock_diag_handlers[family] != hnld);
+       sock_diag_handlers[family] = NULL;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister);
+
+static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+       if (sock_diag_handlers[family] == NULL)
+               request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+                               NETLINK_SOCK_DIAG, family);
+
+       mutex_lock(&sock_diag_table_mutex);
+       return sock_diag_handlers[family];
+}
+
+static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
+{
+       mutex_unlock(&sock_diag_table_mutex);
+}
+
+static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int err;
+       struct sock_diag_req *req = NLMSG_DATA(nlh);
+       struct sock_diag_handler *hndl;
+
+       if (nlmsg_len(nlh) < sizeof(*req))
+               return -EINVAL;
+
+       hndl = sock_diag_lock_handler(req->sdiag_family);
+       if (hndl == NULL)
+               err = -ENOENT;
+       else
+               err = hndl->dump(skb, nlh);
+       sock_diag_unlock_handler(hndl);
+
+       return err;
+}
+
+static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int ret;
+
+       switch (nlh->nlmsg_type) {
+       case TCPDIAG_GETSOCK:
+       case DCCPDIAG_GETSOCK:
+               if (inet_rcv_compat == NULL)
+                       request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+                                       NETLINK_SOCK_DIAG, AF_INET);
+
+               mutex_lock(&sock_diag_table_mutex);
+               if (inet_rcv_compat != NULL)
+                       ret = inet_rcv_compat(skb, nlh);
+               else
+                       ret = -EOPNOTSUPP;
+               mutex_unlock(&sock_diag_table_mutex);
+
+               return ret;
+       case SOCK_DIAG_BY_FAMILY:
+               return __sock_diag_rcv_msg(skb, nlh);
+       default:
+               return -EINVAL;
+       }
+}
+
+static DEFINE_MUTEX(sock_diag_mutex);
+
+static void sock_diag_rcv(struct sk_buff *skb)
+{
+       mutex_lock(&sock_diag_mutex);
+       netlink_rcv_skb(skb, &sock_diag_rcv_msg);
+       mutex_unlock(&sock_diag_mutex);
+}
+
+struct sock *sock_diag_nlsk;
+EXPORT_SYMBOL_GPL(sock_diag_nlsk);
+
+static int __init sock_diag_init(void)
+{
+       sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
+                                       sock_diag_rcv, NULL, THIS_MODULE);
+       return sock_diag_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __exit sock_diag_exit(void)
+{
+       netlink_kernel_release(sock_diag_nlsk);
+}
+
+module_init(sock_diag_init);
+module_exit(sock_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
index 77a65f031488b3f222e13d1f071a2d3f44218c70..d05559d4d9cd4bbf5d97bd1ce1f058ef016a8e3a 100644 (file)
@@ -68,8 +68,13 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 
                if (sock_table != orig_sock_table) {
                        rcu_assign_pointer(rps_sock_flow_table, sock_table);
-                       synchronize_rcu();
-                       vfree(orig_sock_table);
+                       if (sock_table)
+                               jump_label_inc(&rps_needed);
+                       if (orig_sock_table) {
+                               jump_label_dec(&rps_needed);
+                               synchronize_rcu();
+                               vfree(orig_sock_table);
+                       }
                }
        }
 
index 67164bb6ae4def6423e404286990bcdb99867388..f053198e730c48c7ea8114706c3d4904228f41fb 100644 (file)
@@ -29,7 +29,7 @@
 
 
 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
-static int ccid2_debug;
+static bool ccid2_debug;
 #define ccid2_pr_debug(format, a...)   DCCP_PR_DEBUG(ccid2_debug, format, ##a)
 #else
 #define ccid2_pr_debug(format, a...)
@@ -174,7 +174,7 @@ out:
 /*
  *     Congestion window validation (RFC 2861).
  */
-static int ccid2_do_cwv = 1;
+static bool ccid2_do_cwv = true;
 module_param(ccid2_do_cwv, bool, 0644);
 MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
 
index 3d604e1349c0b0e5d94ff755a90c7dd395b92a24..560627307200d5d63714c67d5a59b494b687f513 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/unaligned.h>
 
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
-static int ccid3_debug;
+static bool ccid3_debug;
 #define ccid3_pr_debug(format, a...)   DCCP_PR_DEBUG(ccid3_debug, format, ##a)
 #else
 #define ccid3_pr_debug(format, a...)
index 1f94b7e01d392929004216b1541d9aa5d88c065c..62b5828acde0906b71fc39955c9f2b36582d395a 100644 (file)
@@ -8,7 +8,7 @@
 #include "tfrc.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-int tfrc_debug;
+bool tfrc_debug;
 module_param(tfrc_debug, bool, 0644);
 MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
 #endif
index f8ee3f5497702c300c43c6529358c4dd5ee137f3..ed698c42a5fbef17b34a9a4daf6fc34464d914f5 100644 (file)
@@ -21,7 +21,7 @@
 #include "packet_history.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-extern int tfrc_debug;
+extern bool tfrc_debug;
 #define tfrc_pr_debug(format, a...)    DCCP_PR_DEBUG(tfrc_debug, format, ##a)
 #else
 #define tfrc_pr_debug(format, a...)
index 583490aaf56f4914f8922fcf025730678ba1030b..29d6bb629a6c7b1479dcffbf05404fe5a98e425e 100644 (file)
@@ -39,7 +39,7 @@
                                                  "%s: " fmt, __func__, ##a)
 
 #ifdef CONFIG_IP_DCCP_DEBUG
-extern int dccp_debug;
+extern bool dccp_debug;
 #define dccp_pr_debug(format, a...)      DCCP_PR_DEBUG(dccp_debug, format, ##a)
 #define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
 #define dccp_debug(fmt, a...)            dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
@@ -357,7 +357,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
 struct dccp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index b21f261da75ee2572e40ce44f44746c3ee899e7a..8f16257533779dcda5e5d18c5413d5d35c2d9162 100644 (file)
@@ -48,11 +48,23 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                dccp_get_info(sk, _info);
 }
 
+static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
+}
+
+static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler dccp_diag_handler = {
-       .idiag_hashinfo  = &dccp_hashinfo,
+       .dump            = dccp_diag_dump,
+       .dump_one        = dccp_diag_dump_one,
        .idiag_get_info  = dccp_diag_get_info,
-       .idiag_type      = DCCPDIAG_GETSOCK,
-       .idiag_info_size = sizeof(struct tcp_info),
+       .idiag_type      = IPPROTO_DCCP,
 };
 
 static int __init dccp_diag_init(void)
@@ -71,4 +83,4 @@ module_exit(dccp_diag_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
 MODULE_DESCRIPTION("DCCP inet_diag handler");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-33 /* AF_INET - IPPROTO_DCCP */);
index 23cea0ee31015d4988ce0456be919f1a24326b79..78a2ad70e1b036db59ee35b49d09657cb03b9aa6 100644 (file)
@@ -490,8 +490,8 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
        new->feat_num        = feat;
        new->is_local        = local;
        new->state           = FEAT_INITIALISING;
-       new->needs_confirm   = 0;
-       new->empty_confirm   = 0;
+       new->needs_confirm   = false;
+       new->empty_confirm   = false;
        new->val             = *fval;
        new->needs_mandatory = mandatory;
 
@@ -517,12 +517,12 @@ static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local,
        new->feat_num        = feat;
        new->is_local        = local;
        new->state           = FEAT_STABLE;     /* transition in 6.6.2 */
-       new->needs_confirm   = 1;
+       new->needs_confirm   = true;
        new->empty_confirm   = (fval == NULL);
        new->val.nn          = 0;               /* zeroes the whole structure */
        if (!new->empty_confirm)
                new->val     = *fval;
-       new->needs_mandatory = 0;
+       new->needs_mandatory = false;
 
        return 0;
 }
@@ -1155,7 +1155,7 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
        }
 
        if (dccp_feat_reconcile(&entry->val, val, len, server, true)) {
-               entry->empty_confirm = 0;
+               entry->empty_confirm = false;
        } else if (is_mandatory) {
                return DCCP_RESET_CODE_MANDATORY_ERROR;
        } else if (entry->state == FEAT_INITIALISING) {
@@ -1171,10 +1171,10 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
                defval = dccp_feat_default_value(feat);
                if (!dccp_feat_reconcile(&entry->val, &defval, 1, server, true))
                        return DCCP_RESET_CODE_OPTION_ERROR;
-               entry->empty_confirm = 1;
+               entry->empty_confirm = true;
        }
-       entry->needs_confirm   = 1;
-       entry->needs_mandatory = 0;
+       entry->needs_confirm   = true;
+       entry->needs_mandatory = false;
        entry->state           = FEAT_STABLE;
        return 0;
 
index 90a919afbed79ee5998f510badfaaa199a441086..1c67fe8ff90d27f32779d4fc674dbc25083393ed 100644 (file)
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
                               inet->inet_sport, inet->inet_dport, sk);
        if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
                rt = NULL;
                goto failure;
        }
@@ -473,10 +474,11 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
                                           struct sk_buff *skb)
 {
        struct rtable *rt;
+       const struct iphdr *iph = ip_hdr(skb);
        struct flowi4 fl4 = {
                .flowi4_oif = skb_rtable(skb)->rt_iif,
-               .daddr = ip_hdr(skb)->saddr,
-               .saddr = ip_hdr(skb)->daddr,
+               .daddr = iph->saddr,
+               .saddr = iph->daddr,
                .flowi4_tos = RT_CONN_FLAGS(sk),
                .flowi4_proto = sk->sk_protocol,
                .fl4_sport = dccp_hdr(skb)->dccph_dport,
index 17ee85ce148dee3ac5e12e572a61f38dcfa8d381..ce903f747e64ba27675726aa3d1ba9db0abd46ad 100644 (file)
@@ -150,8 +150,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                         */
                        memset(&fl6, 0, sizeof(fl6));
                        fl6.flowi6_proto = IPPROTO_DCCP;
-                       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-                       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+                       fl6.daddr = np->daddr;
+                       fl6.saddr = np->saddr;
                        fl6.flowi6_oif = sk->sk_bound_dev_if;
                        fl6.fl6_dport = inet->inet_dport;
                        fl6.fl6_sport = inet->inet_sport;
@@ -244,8 +244,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_DCCP;
-       ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
-       ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+       fl6.daddr = ireq6->rmt_addr;
+       fl6.saddr = ireq6->loc_addr;
        fl6.flowlabel = 0;
        fl6.flowi6_oif = ireq6->iif;
        fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -270,7 +270,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
                dh->dccph_checksum = dccp_v6_csum_finish(skb,
                                                         &ireq6->loc_addr,
                                                         &ireq6->rmt_addr);
-               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               fl6.daddr = ireq6->rmt_addr;
                err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -313,8 +313,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
                                                            &rxip6h->daddr);
 
        memset(&fl6, 0, sizeof(fl6));
-       ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
-       ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
+       fl6.daddr = rxip6h->saddr;
+       fl6.saddr = rxip6h->daddr;
 
        fl6.flowi6_proto = IPPROTO_DCCP;
        fl6.flowi6_oif = inet6_iif(rxskb);
@@ -419,8 +419,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        ireq6 = inet6_rsk(req);
-       ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
 
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -491,7 +491,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+               newnp->rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
                newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -526,9 +526,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_DCCP;
-               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               fl6.daddr = ireq6->rmt_addr;
                final_p = fl6_update_dst(&fl6, opt, &final);
-               ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+               fl6.saddr = ireq6->loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.fl6_dport = inet_rsk(req)->rmt_port;
                fl6.fl6_sport = inet_rsk(req)->loc_port;
@@ -559,9 +559,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
-       ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
-       ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
+       newnp->daddr = ireq6->rmt_addr;
+       newnp->saddr = ireq6->loc_addr;
+       newnp->rcv_saddr = ireq6->loc_addr;
        newsk->sk_bound_dev_if = ireq6->iif;
 
        /* Now IPv6 options...
@@ -877,7 +877,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
-                       ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+                       usin->sin6_addr = flowlabel->dst;
                        fl6_sock_release(flowlabel);
                }
        }
@@ -910,7 +910,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        return -EINVAL;
        }
 
-       ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+       np->daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -949,8 +949,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                saddr = &np->rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_DCCP;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
+       fl6.daddr = np->daddr;
+       fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
@@ -966,11 +966,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               ipv6_addr_copy(&np->rcv_saddr, saddr);
+               np->rcv_saddr = *saddr;
        }
 
        /* set the source address */
-       ipv6_addr_copy(&np->saddr, saddr);
+       np->saddr = *saddr;
        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
        __ip6_dst_store(sk, dst, NULL, NULL);
index d7041a0963af9588142133187c28e7c3a9abc215..5a7f90bbffacda3c646a8e4d04f7b2c9b6652772 100644 (file)
@@ -53,15 +53,15 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
        if (tw != NULL) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        const struct ipv6_pinfo *np = inet6_sk(sk);
                        struct inet6_timewait_sock *tw6;
 
                        tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
                        tw6 = inet6_twsk((struct sock *)tw);
-                       ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
-                       ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+                       tw6->tw_v6_daddr = np->daddr;
+                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
                        tw->tw_ipv6only = np->ipv6only;
                }
 #endif
@@ -100,7 +100,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
         *   (* Generate a new socket and switch to that socket *)
         *   Set S := new socket for this port pair
         */
-       struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+       struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
        if (newsk != NULL) {
                struct dccp_request_sock *dreq = dccp_rsk(req);
index 4b2ab657ac8e616f415312d96a2b9c0189489a18..68fa6b7a3e016bbd7d9a3bf173040b39bc86eb20 100644 (file)
@@ -544,7 +544,7 @@ int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
        }
 
        if (unlikely(val == NULL || len == 0))
-               len = repeat_first = 0;
+               len = repeat_first = false;
        tot_len = 3 + repeat_first + len;
 
        if (DCCP_SKB_CB(skb)->dccpd_opt_len + tot_len > DCCP_MAX_OPT_LEN) {
index 33d0e6297c213810a71beeb29f17c3c928727ff3..0a8d6ebd9b45063ab32eba86e905b29148031ef5 100644 (file)
@@ -152,6 +152,17 @@ static const struct file_operations dccpprobe_fops = {
        .llseek  = noop_llseek,
 };
 
+static __init int setup_jprobe(void)
+{
+       int ret = register_jprobe(&dccp_send_probe);
+
+       if (ret) {
+               request_module("dccp");
+               ret = register_jprobe(&dccp_send_probe);
+       }
+       return ret;
+}
+
 static __init int dccpprobe_init(void)
 {
        int ret = -ENOMEM;
@@ -163,8 +174,7 @@ static __init int dccpprobe_init(void)
        if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
                goto err0;
 
-       try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
-                               "dccp");
+       ret = setup_jprobe();
        if (ret)
                goto err1;
 
index e742f90a6858879aa075a0900230abf533eb89d3..7065c0ae1e7b5e7e4092b00fadcf54f8fe90061b 100644 (file)
@@ -1099,7 +1099,7 @@ module_param(thash_entries, int, 0444);
 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
 
 #ifdef CONFIG_IP_DCCP_DEBUG
-int dccp_debug;
+bool dccp_debug;
 module_param(dccp_debug, bool, 0644);
 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
 
index 7f0eb087dc116390ebb67aca72295f95d24dcde3..befe426491ba0c793ac176ed9d3994f5d73e9b31 100644 (file)
@@ -88,9 +88,9 @@ static const struct neigh_ops dn_phase3_ops = {
 
 static u32 dn_neigh_hash(const void *pkey,
                         const struct net_device *dev,
-                        __u32 hash_rnd)
+                        __u32 *hash_rnd)
 {
-       return jhash_2words(*(__u16 *)pkey, 0, hash_rnd);
+       return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]);
 }
 
 struct neigh_table dn_neigh_table = {
@@ -107,7 +107,7 @@ struct neigh_table dn_neigh_table = {
                .gc_staletime = 60 * HZ,
                .reachable_time =               30 * HZ,
                .delay_probe_time =     5 * HZ,
-               .queue_len =            3,
+               .queue_len_bytes =      64*1024,
                .ucast_probes = 0,
                .app_probes =           0,
                .mcast_probes = 0,
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
-       struct neighbour *neigh = dst_get_neighbour(dst);
+       struct neighbour *neigh = dst_get_neighbour_noref(dst);
        struct net_device *dev = neigh->dev;
        char mac_addr[ETH_ALEN];
 
index a77d16158eb6fa2eec5b1890b81ce12b92d88851..f31ce72dca65517a144d61f233e086a2c9ebf481 100644 (file)
@@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
 static int dn_dst_gc(struct dst_ops *ops);
 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static unsigned int dn_dst_mtu(const struct dst_entry *dst);
 static void dn_dst_destroy(struct dst_entry *);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
@@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
        .gc =                   dn_dst_gc,
        .check =                dn_dst_check,
        .default_advmss =       dn_dst_default_advmss,
-       .default_mtu =          dn_dst_default_mtu,
+       .mtu =                  dn_dst_mtu,
        .cow_metrics =          dst_cow_metrics_generic,
        .destroy =              dn_dst_destroy,
        .negative_advice =      dn_dst_negative_advice,
@@ -244,7 +244,7 @@ static int dn_dst_gc(struct dst_ops *ops)
  */
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-       struct neighbour *n = dst_get_neighbour(dst);
+       struct neighbour *n = dst_get_neighbour_noref(dst);
        u32 min_mtu = 230;
        struct dn_dev *dn;
 
@@ -713,7 +713,7 @@ out:
 static int dn_to_neigh_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *n = dst_get_neighbour(dst);
+       struct neighbour *n = dst_get_neighbour_noref(dst);
 
        return n->output(n, skb);
 }
@@ -728,7 +728,7 @@ static int dn_output(struct sk_buff *skb)
 
        int err = -EINVAL;
 
-       if ((neigh = dst_get_neighbour(dst)) == NULL)
+       if ((neigh = dst_get_neighbour_noref(dst)) == NULL)
                goto error;
 
        skb->dev = dev;
@@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
        return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
 }
 
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
+static unsigned int dn_dst_mtu(const struct dst_entry *dst)
 {
-       return dst->dev->mtu;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -850,7 +852,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
        }
        rt->rt_type = res->type;
 
-       if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
+       if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
                n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
                if (IS_ERR(n))
                        return PTR_ERR(n);
index 67f691bd4acfee77b2b3402e2e9e4e5092bab8c0..d9c150cc59a952ac86585b6c600acc398b80bc14 100644 (file)
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
 
 void dn_start_slow_timer(struct sock *sk)
 {
-       sk->sk_timer.expires    = jiffies + SLOW_INTERVAL;
-       sk->sk_timer.function   = dn_slow_timer;
-       sk->sk_timer.data       = (unsigned long)sk;
-
-       add_timer(&sk->sk_timer);
+       setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 }
 
 void dn_stop_slow_timer(struct sock *sk)
 {
-       del_timer(&sk->sk_timer);
+       sk_stop_timer(sk, &sk->sk_timer);
 }
 
 static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
        struct sock *sk = (struct sock *)arg;
        struct dn_scp *scp = DN_SK(sk);
 
-       sock_hold(sk);
        bh_lock_sock(sk);
 
        if (sock_owned_by_user(sk)) {
-               sk->sk_timer.expires = jiffies + HZ / 10;
-               add_timer(&sk->sk_timer);
+               sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
                goto out;
        }
 
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
                        scp->keepalive_fxn(sk);
        }
 
-       sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
-       add_timer(&sk->sk_timer);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 out:
        bh_unlock_sock(sk);
        sock_put(sk);
index c53ded2a98dfbcd26113a2265fc14f22fa352971..274791cd7a35df1274e9120d199af84ea9f60dd0 100644 (file)
@@ -1,5 +1,5 @@
-menuconfig NET_DSA
-       bool "Distributed Switch Architecture support"
+config NET_DSA
+       tristate "Distributed Switch Architecture support"
        default n
        depends on EXPERIMENTAL && NETDEVICES && !S390
        select PHYLIB
@@ -23,38 +23,4 @@ config NET_DSA_TAG_TRAILER
        bool
        default n
 
-
-# switch drivers
-config NET_DSA_MV88E6XXX
-       bool
-       default n
-
-config NET_DSA_MV88E6060
-       bool "Marvell 88E6060 ethernet switch chip support"
-       select NET_DSA_TAG_TRAILER
-       ---help---
-         This enables support for the Marvell 88E6060 ethernet switch
-         chip.
-
-config NET_DSA_MV88E6XXX_NEED_PPU
-       bool
-       default n
-
-config NET_DSA_MV88E6131
-       bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
-       select NET_DSA_MV88E6XXX
-       select NET_DSA_MV88E6XXX_NEED_PPU
-       select NET_DSA_TAG_DSA
-       ---help---
-         This enables support for the Marvell 88E6085/6095/6095F/6131
-         ethernet switch chips.
-
-config NET_DSA_MV88E6123_61_65
-       bool "Marvell 88E6123/6161/6165 ethernet switch chip support"
-       select NET_DSA_MV88E6XXX
-       select NET_DSA_TAG_EDSA
-       ---help---
-         This enables support for the Marvell 88E6123/6161/6165
-         ethernet switch chips.
-
 endif
index 2374faff4dea8510cda51aec9d42391e3a124062..7b9fcbbeda5d0d80678ba909f76b0b30d8436a2d 100644 (file)
@@ -1,13 +1,8 @@
-# tagging formats
-obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
-obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
-obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
-
-# switch drivers
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
-obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6123_61_65) += mv88e6123_61_65.o
-obj-$(CONFIG_NET_DSA_MV88E6131) += mv88e6131.o
-
 # the core
-obj-$(CONFIG_NET_DSA) += dsa.o slave.o
+obj-$(CONFIG_NET_DSA) += dsa_core.o
+dsa_core-y += dsa.o slave.o
+
+# tagging formats
+dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
index 0dc1589343c3f38335660752433d8b51d2898089..88e7c2f3fa0d470404f6560db952cb30a9639a33 100644 (file)
@@ -29,6 +29,7 @@ void register_switch_driver(struct dsa_switch_driver *drv)
        list_add_tail(&drv->list, &dsa_switch_drivers);
        mutex_unlock(&dsa_switch_drivers_mutex);
 }
+EXPORT_SYMBOL_GPL(register_switch_driver);
 
 void unregister_switch_driver(struct dsa_switch_driver *drv)
 {
@@ -36,6 +37,7 @@ void unregister_switch_driver(struct dsa_switch_driver *drv)
        list_del_init(&drv->list);
        mutex_unlock(&dsa_switch_drivers_mutex);
 }
+EXPORT_SYMBOL_GPL(unregister_switch_driver);
 
 static struct dsa_switch_driver *
 dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name)
@@ -199,29 +201,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 }
 
 
-/* hooks for ethertype-less tagging formats *********************************/
-/*
- * The original DSA tag format and some other tag formats have no
- * ethertype, which means that we need to add a little hack to the
- * networking receive path to make sure that received frames get
- * the right ->protocol assigned to them when one of those tag
- * formats is in use.
- */
-bool dsa_uses_dsa_tags(void *dsa_ptr)
-{
-       struct dsa_switch_tree *dst = dsa_ptr;
-
-       return !!(dst->tag_protocol == htons(ETH_P_DSA));
-}
-
-bool dsa_uses_trailer_tags(void *dsa_ptr)
-{
-       struct dsa_switch_tree *dst = dsa_ptr;
-
-       return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
-}
-
-
 /* link polling *************************************************************/
 static void dsa_link_poll_work(struct work_struct *ugly)
 {
@@ -419,12 +398,36 @@ static struct platform_driver dsa_driver = {
 
 static int __init dsa_init_module(void)
 {
-       return platform_driver_register(&dsa_driver);
+       int rc;
+
+       rc = platform_driver_register(&dsa_driver);
+       if (rc)
+               return rc;
+
+#ifdef CONFIG_NET_DSA_TAG_DSA
+       dev_add_pack(&dsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+       dev_add_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+       dev_add_pack(&trailer_packet_type);
+#endif
+       return 0;
 }
 module_init(dsa_init_module);
 
 static void __exit dsa_cleanup_module(void)
 {
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+       dev_remove_pack(&trailer_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+       dev_remove_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_DSA
+       dev_remove_pack(&dsa_packet_type);
+#endif
        platform_driver_unregister(&dsa_driver);
 }
 module_exit(dsa_cleanup_module);
index 4b0ea0540442e188e4ab4adc938a304d4c080ce4..d4cf5cc747e3569f4d41855d9894b7a4fa98253d 100644 (file)
 #ifndef __DSA_PRIV_H
 #define __DSA_PRIV_H
 
-#include <linux/list.h>
 #include <linux/phy.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
 #include <net/dsa.h>
 
-struct dsa_switch {
-       /*
-        * Parent switch tree, and switch index.
-        */
-       struct dsa_switch_tree  *dst;
-       int                     index;
-
-       /*
-        * Configuration data for this switch.
-        */
-       struct dsa_chip_data    *pd;
-
-       /*
-        * The used switch driver.
-        */
-       struct dsa_switch_driver        *drv;
-
-       /*
-        * Reference to mii bus to use.
-        */
-       struct mii_bus          *master_mii_bus;
-
-       /*
-        * Slave mii_bus and devices for the individual ports.
-        */
-       u32                     dsa_port_mask;
-       u32                     phys_port_mask;
-       struct mii_bus          *slave_mii_bus;
-       struct net_device       *ports[DSA_MAX_PORTS];
-};
-
-struct dsa_switch_tree {
-       /*
-        * Configuration data for the platform device that owns
-        * this dsa switch tree instance.
-        */
-       struct dsa_platform_data        *pd;
-
-       /*
-        * Reference to network device to use, and which tagging
-        * protocol to use.
-        */
-       struct net_device       *master_netdev;
-       __be16                  tag_protocol;
-
-       /*
-        * The switch and port to which the CPU is attached.
-        */
-       s8                      cpu_switch;
-       s8                      cpu_port;
-
-       /*
-        * Link state polling.
-        */
-       int                     link_poll_needed;
-       struct work_struct      link_poll_work;
-       struct timer_list       link_poll_timer;
-
-       /*
-        * Data for the individual switch chips.
-        */
-       struct dsa_switch       *ds[DSA_MAX_SWITCHES];
-};
-
-static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
-{
-       return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
-}
-
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
-{
-       struct dsa_switch_tree *dst = ds->dst;
-
-       /*
-        * If this is the root switch (i.e. the switch that connects
-        * to the CPU), return the cpu port number on this switch.
-        * Else return the (DSA) port number that connects to the
-        * switch that is one hop closer to the cpu.
-        */
-       if (dst->cpu_switch == ds->index)
-               return dst->cpu_port;
-       else
-               return ds->pd->rtable[dst->cpu_switch];
-}
-
 struct dsa_slave_priv {
        /*
         * The linux network interface corresponding to this
@@ -123,44 +35,8 @@ struct dsa_slave_priv {
        struct phy_device       *phy;
 };
 
-struct dsa_switch_driver {
-       struct list_head        list;
-
-       __be16                  tag_protocol;
-       int                     priv_size;
-
-       /*
-        * Probing and setup.
-        */
-       char    *(*probe)(struct mii_bus *bus, int sw_addr);
-       int     (*setup)(struct dsa_switch *ds);
-       int     (*set_addr)(struct dsa_switch *ds, u8 *addr);
-
-       /*
-        * Access to the switch's PHY registers.
-        */
-       int     (*phy_read)(struct dsa_switch *ds, int port, int regnum);
-       int     (*phy_write)(struct dsa_switch *ds, int port,
-                            int regnum, u16 val);
-
-       /*
-        * Link state polling and IRQ handling.
-        */
-       void    (*poll_link)(struct dsa_switch *ds);
-
-       /*
-        * ethtool hardware statistics.
-        */
-       void    (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
-       void    (*get_ethtool_stats)(struct dsa_switch *ds,
-                                    int port, uint64_t *data);
-       int     (*get_sset_count)(struct dsa_switch *ds);
-};
-
 /* dsa.c */
 extern char dsa_driver_version[];
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
 
 /* slave.c */
 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
@@ -170,12 +46,15 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds,
 
 /* tag_dsa.c */
 netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type dsa_packet_type;
 
 /* tag_edsa.c */
 netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type edsa_packet_type;
 
 /* tag_trailer.c */
 netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type trailer_packet_type;
 
 
 #endif
index 98dfe80b45381f1c5b4bea6459f8d1b5e932c870..cacce1e22f9caa029c2374cd93c2e071932f365c 100644 (file)
@@ -186,20 +186,7 @@ out:
        return 0;
 }
 
-static struct packet_type dsa_packet_type __read_mostly = {
+struct packet_type dsa_packet_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_DSA),
        .func   = dsa_rcv,
 };
-
-static int __init dsa_init_module(void)
-{
-       dev_add_pack(&dsa_packet_type);
-       return 0;
-}
-module_init(dsa_init_module);
-
-static void __exit dsa_cleanup_module(void)
-{
-       dev_remove_pack(&dsa_packet_type);
-}
-module_exit(dsa_cleanup_module);
index 6f383322ad2508570d793f3c6bd8078c4152c0b4..e70c43c25e64c9310d3d5a61b407d8eeb76402d2 100644 (file)
@@ -205,20 +205,7 @@ out:
        return 0;
 }
 
-static struct packet_type edsa_packet_type __read_mostly = {
+struct packet_type edsa_packet_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_EDSA),
        .func   = edsa_rcv,
 };
-
-static int __init edsa_init_module(void)
-{
-       dev_add_pack(&edsa_packet_type);
-       return 0;
-}
-module_init(edsa_init_module);
-
-static void __exit edsa_cleanup_module(void)
-{
-       dev_remove_pack(&edsa_packet_type);
-}
-module_exit(edsa_cleanup_module);
index d6d7d0add3cb4634fd00d61fb983973a4e5a4551..94bc260d015d11f1a321ca3f3876c7b901716302 100644 (file)
@@ -114,20 +114,7 @@ out:
        return 0;
 }
 
-static struct packet_type trailer_packet_type __read_mostly = {
+struct packet_type trailer_packet_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_TRAILER),
        .func   = trailer_rcv,
 };
-
-static int __init trailer_init_module(void)
-{
-       dev_add_pack(&trailer_packet_type);
-       return 0;
-}
-module_init(trailer_init_module);
-
-static void __exit trailer_cleanup_module(void)
-{
-       dev_remove_pack(&trailer_packet_type);
-}
-module_exit(trailer_cleanup_module);
index 1c1f26c5d672034973187409c94688e59bf03e55..7e717cb35ad15c7437dba106107cf4cebb653a52 100644 (file)
@@ -322,6 +322,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
                /* Real hardware Econet.  We're not worthy etc. */
 #ifdef CONFIG_ECONET_NATIVE
                unsigned short proto = 0;
+               int hlen, tlen;
                int res;
 
                if (len + 15 > dev->mtu) {
@@ -331,12 +332,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
                dev_hold(dev);
 
-               skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
+               hlen = LL_RESERVED_SPACE(dev);
+               tlen = dev->needed_tailroom;
+               skb = sock_alloc_send_skb(sk, len + hlen + tlen,
                                          msg->msg_flags & MSG_DONTWAIT, &err);
                if (skb == NULL)
                        goto out_unlock;
 
-               skb_reserve(skb, LL_RESERVED_SPACE(dev));
+               skb_reserve(skb, hlen);
                skb_reset_network_header(skb);
 
                eb = (struct ec_cb *)&skb->cb;
index 19d6aefe97d4b9de337d8df3848a4b988bb5a5bd..e4ecc1eef98c3b2aca2b583de283bc7bc7b2f44c 100644 (file)
@@ -50,8 +50,6 @@
  * SUCH DAMAGE.
  */
 
-#define DEBUG
-
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
 #include <linux/module.h>
@@ -113,6 +111,20 @@ struct lowpan_dev_record {
        struct list_head list;
 };
 
+struct lowpan_fragment {
+       struct sk_buff          *skb;           /* skb to be assembled */
+       spinlock_t              lock;           /* concurency lock */
+       u16                     length;         /* length to be assemled */
+       u32                     bytes_rcv;      /* bytes received */
+       u16                     tag;            /* current fragment tag */
+       struct timer_list       timer;          /* assembling timer */
+       struct list_head        list;           /* fragments list */
+};
+
+static unsigned short fragment_tag;
+static LIST_HEAD(lowpan_fragments);
+spinlock_t flist_lock;
+
 static inline struct
 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 {
@@ -234,6 +246,50 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
        return 0;
 }
 
+static void
+lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_hdr(skb);
+
+       pr_debug("(%s): UDP header compression\n", __func__);
+
+       if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
+                               LOWPAN_NHC_UDP_4BIT_PORT) &&
+           ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
+                               LOWPAN_NHC_UDP_4BIT_PORT)) {
+               pr_debug("(%s): both ports compression to 4 bits\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
+               **(hc06_ptr + 1) = /* subtraction is faster */
+                  (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
+                      ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
+               *hc06_ptr += 2;
+       } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
+                       LOWPAN_NHC_UDP_8BIT_PORT) {
+               pr_debug("(%s): remove 8 bits of dest\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
+               memcpy(*hc06_ptr + 1, &uh->source, 2);
+               **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
+               *hc06_ptr += 4;
+       } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
+                       LOWPAN_NHC_UDP_8BIT_PORT) {
+               pr_debug("(%s): remove 8 bits of source\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
+               memcpy(*hc06_ptr + 1, &uh->dest, 2);
+               **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
+               *hc06_ptr += 4;
+       } else {
+               pr_debug("(%s): can't compress header\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
+               memcpy(*hc06_ptr + 1, &uh->source, 2);
+               memcpy(*hc06_ptr + 3, &uh->dest, 2);
+               *hc06_ptr += 5;
+       }
+
+       /* checksum is always inline */
+       memcpy(*hc06_ptr, &uh->check, 2);
+       *hc06_ptr += 2;
+}
+
 static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
 {
        u8 ret;
@@ -244,6 +300,73 @@ static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
        return ret;
 }
 
+static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
+{
+       u16 ret;
+
+       BUG_ON(!pskb_may_pull(skb, 2));
+
+       ret = skb->data[0] | (skb->data[1] << 8);
+       skb_pull(skb, 2);
+       return ret;
+}
+
+static int
+lowpan_uncompress_udp_header(struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_hdr(skb);
+       u8 tmp;
+
+       tmp = lowpan_fetch_skb_u8(skb);
+
+       if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+               pr_debug("(%s): UDP header uncompression\n", __func__);
+               switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+               case LOWPAN_NHC_UDP_CS_P_00:
+                       memcpy(&uh->source, &skb->data[0], 2);
+                       memcpy(&uh->dest, &skb->data[2], 2);
+                       skb_pull(skb, 4);
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_01:
+                       memcpy(&uh->source, &skb->data[0], 2);
+                       uh->dest =
+                          skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
+                       skb_pull(skb, 3);
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_10:
+                       uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
+                       memcpy(&uh->dest, &skb->data[1], 2);
+                       skb_pull(skb, 3);
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_11:
+                       uh->source =
+                          LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
+                       uh->dest =
+                          LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
+                       skb_pull(skb, 1);
+                       break;
+               default:
+                       pr_debug("(%s) ERROR: unknown UDP format\n", __func__);
+                       goto err;
+                       break;
+               }
+
+               pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
+                                       __func__, uh->source, uh->dest);
+
+               /* copy checksum */
+               memcpy(&uh->check, &skb->data[0], 2);
+               skb_pull(skb, 2);
+       } else {
+               pr_debug("(%s): ERROR: unsupported NH format\n", __func__);
+               goto err;
+       }
+
+       return 0;
+err:
+       return -EINVAL;
+}
+
 static int lowpan_header_create(struct sk_buff *skb,
                           struct net_device *dev,
                           unsigned short type, const void *_daddr,
@@ -342,8 +465,6 @@ static int lowpan_header_create(struct sk_buff *skb,
        if (hdr->nexthdr == UIP_PROTO_UDP)
                iphc0 |= LOWPAN_IPHC_NH_C;
 
-/* TODO: next header compression */
-
        if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
                *hc06_ptr = hdr->nexthdr;
                hc06_ptr += 1;
@@ -431,8 +552,9 @@ static int lowpan_header_create(struct sk_buff *skb,
                }
        }
 
-       /* TODO: UDP header compression */
-       /* TODO: Next Header compression */
+       /* UDP header compression */
+       if (hdr->nexthdr == UIP_PROTO_UDP)
+               lowpan_compress_udp_header(&hc06_ptr, skb);
 
        head[0] = iphc0;
        head[1] = iphc1;
@@ -467,6 +589,7 @@ static int lowpan_header_create(struct sk_buff *skb,
                memcpy(&(sa.hwaddr), saddr, 8);
 
                mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+
                return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
                                type, (void *)&da, (void *)&sa, skb->len);
        }
@@ -511,6 +634,21 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
        return stat;
 }
 
+static void lowpan_fragment_timer_expired(unsigned long entry_addr)
+{
+       struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
+
+       pr_debug("%s: timer expired for frame with tag %d\n", __func__,
+                                                               entry->tag);
+
+       spin_lock(&flist_lock);
+       list_del(&entry->list);
+       spin_unlock(&flist_lock);
+
+       dev_kfree_skb(entry->skb);
+       kfree(entry);
+}
+
 static int
 lowpan_process_data(struct sk_buff *skb)
 {
@@ -525,6 +663,107 @@ lowpan_process_data(struct sk_buff *skb)
        if (skb->len < 2)
                goto drop;
        iphc0 = lowpan_fetch_skb_u8(skb);
+
+       /* fragments assembling */
+       switch (iphc0 & LOWPAN_DISPATCH_MASK) {
+       case LOWPAN_DISPATCH_FRAG1:
+       case LOWPAN_DISPATCH_FRAGN:
+       {
+               struct lowpan_fragment *frame;
+               u8 len, offset;
+               u16 tag;
+               bool found = false;
+
+               len = lowpan_fetch_skb_u8(skb); /* frame length */
+               tag = lowpan_fetch_skb_u16(skb);
+
+               /*
+                * check if frame assembling with the same tag is
+                * already in progress
+                */
+               spin_lock(&flist_lock);
+
+               list_for_each_entry(frame, &lowpan_fragments, list)
+                       if (frame->tag == tag) {
+                               found = true;
+                               break;
+                       }
+
+               /* alloc new frame structure */
+               if (!found) {
+                       frame = kzalloc(sizeof(struct lowpan_fragment),
+                                                               GFP_ATOMIC);
+                       if (!frame)
+                               goto unlock_and_drop;
+
+                       INIT_LIST_HEAD(&frame->list);
+
+                       frame->length = (iphc0 & 7) | (len << 3);
+                       frame->tag = tag;
+
+                       /* allocate buffer for frame assembling */
+                       frame->skb = alloc_skb(frame->length +
+                                       sizeof(struct ipv6hdr), GFP_ATOMIC);
+
+                       if (!frame->skb) {
+                               kfree(frame);
+                               goto unlock_and_drop;
+                       }
+
+                       frame->skb->priority = skb->priority;
+                       frame->skb->dev = skb->dev;
+
+                       /* reserve headroom for uncompressed ipv6 header */
+                       skb_reserve(frame->skb, sizeof(struct ipv6hdr));
+                       skb_put(frame->skb, frame->length);
+
+                       init_timer(&frame->timer);
+                       /* time out is the same as for ipv6 - 60 sec */
+                       frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
+                       frame->timer.data = (unsigned long)frame;
+                       frame->timer.function = lowpan_fragment_timer_expired;
+
+                       add_timer(&frame->timer);
+
+                       list_add_tail(&frame->list, &lowpan_fragments);
+               }
+
+               if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
+                       goto unlock_and_drop;
+
+               offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
+
+               /* if payload fits buffer, copy it */
+               if (likely((offset * 8 + skb->len) <= frame->length))
+                       skb_copy_to_linear_data_offset(frame->skb, offset * 8,
+                                                       skb->data, skb->len);
+               else
+                       goto unlock_and_drop;
+
+               frame->bytes_rcv += skb->len;
+
+               /* frame assembling complete */
+               if ((frame->bytes_rcv == frame->length) &&
+                    frame->timer.expires > jiffies) {
+                       /* if timer haven't expired - first of all delete it */
+                       del_timer(&frame->timer);
+                       list_del(&frame->list);
+                       spin_unlock(&flist_lock);
+
+                       dev_kfree_skb(skb);
+                       skb = frame->skb;
+                       kfree(frame);
+                       iphc0 = lowpan_fetch_skb_u8(skb);
+                       break;
+               }
+               spin_unlock(&flist_lock);
+
+               return kfree_skb(skb), 0;
+       }
+       default:
+               break;
+       }
+
        iphc1 = lowpan_fetch_skb_u8(skb);
 
        _saddr = mac_cb(skb)->sa.hwaddr;
@@ -659,7 +898,10 @@ lowpan_process_data(struct sk_buff *skb)
                        goto drop;
        }
 
-       /* TODO: UDP header parse */
+       /* UDP data uncompression */
+       if (iphc0 & LOWPAN_IPHC_NH_C)
+               if (lowpan_uncompress_udp_header(skb))
+                       goto drop;
 
        /* Not fragmented package */
        hdr.payload_len = htons(skb->len);
@@ -674,6 +916,9 @@ lowpan_process_data(struct sk_buff *skb)
        lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
                                                        sizeof(hdr));
        return lowpan_skb_deliver(skb, &hdr);
+
+unlock_and_drop:
+       spin_unlock(&flist_lock);
 drop:
        kfree_skb(skb);
        return -EINVAL;
@@ -692,18 +937,115 @@ static int lowpan_set_address(struct net_device *dev, void *p)
        return 0;
 }
 
+static int lowpan_get_mac_header_length(struct sk_buff *skb)
+{
+       /*
+        * Currently long addressing mode is supported only, so the overall
+        * header size is 21:
+        * FC SeqNum DPAN DA  SA  Sec
+        * 2  +  1  +  2 + 8 + 8 + 0  = 21
+        */
+       return 21;
+}
+
+static int
+lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
+                       int mlen, int plen, int offset)
+{
+       struct sk_buff *frag;
+       int hlen, ret;
+
+       /* if payload length is zero, therefore it's a first fragment */
+       hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE :  LOWPAN_FRAGN_HEAD_SIZE);
+
+       lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+
+       frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
+       if (!frag)
+               return -ENOMEM;
+
+       frag->priority = skb->priority;
+       frag->dev = skb->dev;
+
+       /* copy header, MFR and payload */
+       memcpy(skb_put(frag, mlen), skb->data, mlen);
+       memcpy(skb_put(frag, hlen), head, hlen);
+
+       if (plen)
+               skb_copy_from_linear_data_offset(skb, offset + mlen,
+                                       skb_put(frag, plen), plen);
+
+       lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
+                                                               frag->len);
+
+       ret = dev_queue_xmit(frag);
+
+       return ret;
+}
+
+static int
+lowpan_skb_fragmentation(struct sk_buff *skb)
+{
+       int  err, header_length, payload_length, tag, offset = 0;
+       u8 head[5];
+
+       header_length = lowpan_get_mac_header_length(skb);
+       payload_length = skb->len - header_length;
+       tag = fragment_tag++;
+
+       /* first fragment header */
+       head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
+       head[1] = (payload_length >> 3) & 0xff;
+       head[2] = tag & 0xff;
+       head[3] = tag >> 8;
+
+       err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
+
+       /* next fragment header */
+       head[0] &= ~LOWPAN_DISPATCH_FRAG1;
+       head[0] |= LOWPAN_DISPATCH_FRAGN;
+
+       while ((payload_length - offset > 0) && (err >= 0)) {
+               int len = LOWPAN_FRAG_SIZE;
+
+               head[4] = offset / 8;
+
+               if (payload_length - offset < len)
+                       len = payload_length - offset;
+
+               err = lowpan_fragment_xmit(skb, head, header_length,
+                                                       len, offset);
+               offset += len;
+       }
+
+       return err;
+}
+
 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       int err = 0;
+       int err = -1;
 
        pr_debug("(%s): package xmit\n", __func__);
 
        skb->dev = lowpan_dev_info(dev)->real_dev;
        if (skb->dev == NULL) {
                pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
-               dev_kfree_skb(skb);
-       } else
+               goto error;
+       }
+
+       if (skb->len <= IEEE802154_MTU) {
                err = dev_queue_xmit(skb);
+               goto out;
+       }
+
+       pr_debug("(%s): frame is too big, fragmentation is needed\n",
+                                                               __func__);
+       err = lowpan_skb_fragmentation(skb);
+error:
+       dev_kfree_skb(skb);
+out:
+       if (err < 0)
+               pr_debug("(%s): ERROR: xmit failed\n", __func__);
 
        return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
 }
@@ -730,13 +1072,12 @@ static void lowpan_setup(struct net_device *dev)
        dev->addr_len           = IEEE802154_ADDR_LEN;
        memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
        dev->type               = ARPHRD_IEEE802154;
-       dev->features           = NETIF_F_NO_CSUM;
        /* Frame Control + Sequence Number + Address fields + Security Header */
        dev->hard_header_len    = 2 + 1 + 20 + 14;
        dev->needed_tailroom    = 2; /* FCS */
        dev->mtu                = 1281;
        dev->tx_queue_len       = 0;
-       dev->flags              = IFF_NOARP | IFF_BROADCAST;
+       dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
        dev->watchdog_timeo     = 0;
 
        dev->netdev_ops         = &lowpan_netdev_ops;
@@ -765,8 +1106,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                goto drop;
 
        /* check that it's our buffer */
-       if ((skb->data[0] & 0xe0) == 0x60)
+       switch (skb->data[0] & 0xe0) {
+       case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
+       case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
+       case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
                lowpan_process_data(skb);
+               break;
+       default:
+               break;
+       }
 
        return NET_RX_SUCCESS;
 
index 5d8cf80b930dc316e874f67b1b78d690fb89bb03..aeff3f3104821ab3bdead00ed070d5d3fbb5164d 100644 (file)
 #define LOWPAN_DISPATCH_FRAG1  0xc0 /* 11000xxx */
 #define LOWPAN_DISPATCH_FRAGN  0xe0 /* 11100xxx */
 
+#define LOWPAN_DISPATCH_MASK   0xf8 /* 11111000 */
+
+#define LOWPAN_FRAG_TIMEOUT    (HZ * 60)       /* time-out 60 sec */
+
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
+/*
+ * According IEEE802.15.4 standard:
+ *   - MTU is 127 octets
+ *   - maximum MHR size is 37 octets
+ *   - MFR size is 2 octets
+ *
+ * so minimal payload size that we may guarantee is:
+ *   MTU - MHR - MFR = 88 octets
+ */
+#define LOWPAN_FRAG_SIZE       88
+
 /*
  * Values of fields within the IPHC encoding first byte
  * (C stands for compressed and I for inline)
 #define LOWPAN_NHC_UDP_CHECKSUMC       0x04
 #define LOWPAN_NHC_UDP_CHECKSUMI       0x00
 
+#define LOWPAN_NHC_UDP_4BIT_PORT       0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK       0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT       0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK       0xFF00
+
 /* values for port compression, _with checksum_ ie bit 5 set to 0 */
 #define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
 #define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
index faecf648123f7bc9bf5ae7a3913c737a89ea4841..1b09eaabaac1239475e1abab843af353bcccdd4f 100644 (file)
@@ -209,6 +209,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        unsigned mtu;
        struct sk_buff *skb;
        struct dgram_sock *ro = dgram_sk(sk);
+       int hlen, tlen;
        int err;
 
        if (msg->msg_flags & MSG_OOB) {
@@ -229,13 +230,15 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        mtu = dev->mtu;
        pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
-       skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
                        msg->msg_flags & MSG_DONTWAIT,
                        &err);
        if (!skb)
                goto out_dev;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_network_header(skb);
 
index 10970ca85748a2a0835b2a9ac17bf26c66c1e9e2..f96bae8fd330213aec8d58b35531e72486e77a54 100644 (file)
@@ -108,6 +108,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct net_device *dev;
        unsigned mtu;
        struct sk_buff *skb;
+       int hlen, tlen;
        int err;
 
        if (msg->msg_flags & MSG_OOB) {
@@ -137,12 +138,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                goto out_dev;
        }
 
-       skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
                        msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb)
                goto out_dev;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
index cbb505ba9324fa05caeb9f93e545d9cfaf42e7af..1a8f93bd2d4f9c4bb0eeb44c640d88317d333ecd 100644 (file)
@@ -409,6 +409,10 @@ config INET_TCP_DIAG
        depends on INET_DIAG
        def_tristate INET_DIAG
 
+config INET_UDP_DIAG
+       depends on INET_DIAG
+       def_tristate INET_DIAG && IPV6
+
 menuconfig TCP_CONG_ADVANCED
        bool "TCP: advanced congestion control"
        ---help---
index f2dc69cffb578b31c1796ac544fe50a842a6fbfa..ff75d3bbcd6a4cee0bfbda1747f592b0b6e91b48 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o
 obj-$(CONFIG_NETFILTER)        += netfilter.o netfilter/
 obj-$(CONFIG_INET_DIAG) += inet_diag.o 
 obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
+obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
 obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
 obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
 obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
index 1b5096a9875aae06db3fa9857800881dcc305d31..f7b5670744f0f92991ddf10854309cb3592bed8d 100644 (file)
@@ -1250,7 +1250,8 @@ out:
        return err;
 }
 
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct iphdr *iph;
@@ -1572,9 +1573,9 @@ static __net_init int ipv4_mib_init_net(struct net *net)
                          sizeof(struct icmp_mib),
                          __alignof__(struct icmp_mib)) < 0)
                goto err_icmp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
-                         sizeof(struct icmpmsg_mib),
-                         __alignof__(struct icmpmsg_mib)) < 0)
+       net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
+                                             GFP_KERNEL);
+       if (!net->mib.icmpmsg_statistics)
                goto err_icmpmsg_mib;
 
        tcp_mib_init(net);
@@ -1598,7 +1599,7 @@ err_tcp_mib:
 
 static __net_exit void ipv4_mib_exit_net(struct net *net)
 {
-       snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+       kfree(net->mib.icmpmsg_statistics);
        snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
        snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
        snmp_mib_free((void __percpu **)net->mib.udp_statistics);
@@ -1671,6 +1672,8 @@ static int __init inet_init(void)
        ip_static_sysctl_init();
 #endif
 
+       tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
        /*
         *      Add all the base protocols.
         */
index c1f4154552fc582320e276e6768831ed9ec32548..36d14406261e8c9ad486103127ae1106baca4678 100644 (file)
@@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
                memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
        }
 
-       err = ah->nexthdr;
-
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_output_resume(skb, err);
 }
@@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
        if (err)
                goto out;
 
+       err = ah->nexthdr;
+
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, ihl);
        __skb_pull(skb, ah_hlen + ihl);
        skb_set_transport_header(skb, -ihl);
-
-       err = ah->nexthdr;
 out:
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_input_resume(skb, err);
@@ -371,8 +369,6 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
                if (err == -EINPROGRESS)
                        goto out;
 
-               if (err == -EBUSY)
-                       err = NET_XMIT_DROP;
                goto out_free;
        }
 
index 96a164aa1367b9c4f48f58c860b4e785261dbd9c..59402be133f0b4cced6dac7d22fea61fcf89dda6 100644 (file)
 #include <net/arp.h>
 #include <net/ax25.h>
 #include <net/netrom.h>
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-#include <net/atmclip.h>
-struct neigh_table *clip_tbl_hook;
-EXPORT_SYMBOL(clip_tbl_hook);
-#endif
 
 #include <asm/system.h>
 #include <linux/uaccess.h>
@@ -126,7 +121,7 @@ EXPORT_SYMBOL(clip_tbl_hook);
 /*
  *     Interface to generic neighbour cache.
  */
-static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 rnd);
+static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd);
 static int arp_constructor(struct neighbour *neigh);
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -164,7 +159,6 @@ static const struct neigh_ops arp_broken_ops = {
 
 struct neigh_table arp_tbl = {
        .family         = AF_INET,
-       .entry_size     = sizeof(struct neighbour) + 4,
        .key_len        = 4,
        .hash           = arp_hash,
        .constructor    = arp_constructor,
@@ -177,7 +171,7 @@ struct neigh_table arp_tbl = {
                .gc_staletime           = 60 * HZ,
                .reachable_time         = 30 * HZ,
                .delay_probe_time       = 5 * HZ,
-               .queue_len              = 3,
+               .queue_len_bytes        = 64*1024,
                .ucast_probes           = 3,
                .mcast_probes           = 3,
                .anycast_delay          = 1 * HZ,
@@ -221,9 +215,9 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
 
 static u32 arp_hash(const void *pkey,
                    const struct net_device *dev,
-                   __u32 hash_rnd)
+                   __u32 *hash_rnd)
 {
-       return arp_hashfn(*(u32 *)pkey, dev, hash_rnd);
+       return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd);
 }
 
 static int arp_constructor(struct neighbour *neigh)
@@ -283,9 +277,9 @@ static int arp_constructor(struct neighbour *neigh)
                default:
                        break;
                case ARPHRD_ROSE:
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
                case ARPHRD_AX25:
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
                case ARPHRD_NETROM:
 #endif
                        neigh->ops = &arp_broken_ops;
@@ -592,16 +586,18 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
        struct sk_buff *skb;
        struct arphdr *arp;
        unsigned char *arp_ptr;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
 
        /*
         *      Allocate a buffer
         */
 
-       skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+       skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
        if (skb == NULL)
                return NULL;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
        arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
        skb->dev = dev;
@@ -633,13 +629,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
                arp->ar_pro = htons(ETH_P_IP);
                break;
 
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        case ARPHRD_AX25:
                arp->ar_hrd = htons(ARPHRD_AX25);
                arp->ar_pro = htons(AX25_P_IP);
                break;
 
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
        case ARPHRD_NETROM:
                arp->ar_hrd = htons(ARPHRD_NETROM);
                arp->ar_pro = htons(AX25_P_IP);
@@ -647,13 +643,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
 #endif
 #endif
 
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
        case ARPHRD_FDDI:
                arp->ar_hrd = htons(ARPHRD_ETHER);
                arp->ar_pro = htons(ETH_P_IP);
                break;
 #endif
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#if IS_ENABLED(CONFIG_TR)
        case ARPHRD_IEEE802_TR:
                arp->ar_hrd = htons(ARPHRD_IEEE802);
                arp->ar_pro = htons(ETH_P_IP);
@@ -1040,7 +1036,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
                        return -EINVAL;
        }
        switch (dev->type) {
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
        case ARPHRD_FDDI:
                /*
                 * According to RFC 1390, FDDI devices should accept ARP
@@ -1286,7 +1282,7 @@ void __init arp_init(void)
 }
 
 #ifdef CONFIG_PROC_FS
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 
 /* ------------------------------------------------------------------------ */
 /*
@@ -1334,7 +1330,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
 
        read_lock(&n->lock);
        /* Convert hardware address to XX:XX:XX:XX ... form. */
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM)
                ax2asc2((ax25_address *)n->ha, hbuffer);
        else {
@@ -1347,7 +1343,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
        if (k != 0)
                --k;
        hbuffer[k] = 0;
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        }
 #endif
        sprintf(tbuf, "%pI4", n->primary_key);
index c6b5092f29a15511bb7c69f37a86dca2c538a164..65f01dc47565bcc26282d4472bbd9c3fb2d3712c 100644 (file)
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
                             void __user *buffer,
                             size_t *lenp, loff_t *ppos)
 {
+       int old_value = *(int *)ctl->data;
        int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       int new_value = *(int *)ctl->data;
 
        if (write) {
                struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
 
                if (cnf == net->ipv4.devconf_dflt)
                        devinet_copy_dflt_conf(net, i);
+               if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+                       if ((new_value == 0) && (old_value != 0))
+                               rt_cache_flush(net, 0);
        }
 
        return ret;
index 46339ba7a2d3372b47a5eaeaffa2a71774008d0e..799fc790b3cfa67cc170799c89416d5b9da5671c 100644 (file)
@@ -67,6 +67,7 @@ int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
 
        return err;
 }
+EXPORT_SYMBOL_GPL(fib_lookup);
 
 static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
                            int flags, struct fib_lookup_arg *arg)
index 37b671185c8192c7de2e7166e9089b9276d98121..d04b13ae18fecc3b641ecf5a45279a620307f15f 100644 (file)
@@ -1607,6 +1607,7 @@ found:
        rcu_read_unlock();
        return ret;
 }
+EXPORT_SYMBOL_GPL(fib_table_lookup);
 
 /*
  * Remove the leaf and return parent.
index c7472eff2d514b475579d1a3e5a89269d04fce0e..fa057d105befea6e775c251f900b763929d4f572 100644 (file)
@@ -304,9 +304,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        struct igmpv3_report *pig;
        struct net *net = dev_net(dev);
        struct flowi4 fl4;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
 
        while (1) {
-               skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+               skb = alloc_skb(size + hlen + tlen,
                                GFP_ATOMIC | __GFP_NOWARN);
                if (skb)
                        break;
@@ -327,7 +329,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        skb_dst_set(skb, &rt->dst);
        skb->dev = dev;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_network_header(skb);
        pip = ip_hdr(skb);
@@ -647,6 +649,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        __be32  group = pmc ? pmc->multiaddr : 0;
        struct flowi4 fl4;
        __be32  dst;
+       int hlen, tlen;
 
        if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
                return igmpv3_send_report(in_dev, pmc);
@@ -661,7 +664,9 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        if (IS_ERR(rt))
                return -1;
 
-       skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
        if (skb == NULL) {
                ip_rt_put(rt);
                return -1;
@@ -669,7 +674,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
 
        skb_dst_set(skb, &rt->dst);
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_network_header(skb);
        iph = ip_hdr(skb);
@@ -1574,7 +1579,7 @@ out_unlock:
  * Add multicast single-source filter to the interface list
  */
 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
-       __be32 *psfsrc, int delta)
+       __be32 *psfsrc)
 {
        struct ip_sf_list *psf, *psf_prev;
 
@@ -1709,14 +1714,15 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                pmc->sfcount[sfmode]++;
        err = 0;
        for (i=0; i<sfcount; i++) {
-               err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+               err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
                if (err)
                        break;
        }
        if (err) {
                int j;
 
-               pmc->sfcount[sfmode]--;
+               if (!delta)
+                       pmc->sfcount[sfmode]--;
                for (j=0; j<i; j++)
                        (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
        } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
index c14d88ad348d3365a11685a59c453d54c4db5332..2e4e24476c4c3ba1b1abb1463ff2f4123180ccad 100644 (file)
@@ -418,7 +418,7 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
        return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
 #define AF_INET_FAMILY(fam) 1
@@ -588,10 +588,19 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
 }
 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
 
-struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
-                           const gfp_t priority)
+/**
+ *     inet_csk_clone_lock - clone an inet socket, and lock its clone
+ *     @sk: the socket to clone
+ *     @req: request_sock
+ *     @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ *     Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+                                const struct request_sock *req,
+                                const gfp_t priority)
 {
-       struct sock *newsk = sk_clone(sk, priority);
+       struct sock *newsk = sk_clone_lock(sk, priority);
 
        if (newsk != NULL) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -615,7 +624,7 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
        }
        return newsk;
 }
-EXPORT_SYMBOL_GPL(inet_csk_clone);
+EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 
 /*
  * At this point, there should be no process reference to this
index f5e2bdaef9495eb52524089f6d9b341cd5503137..2240a8e8c44dbefaed2a83aa44b6e058970fc65b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/stddef.h>
 
 #include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
 
 static const struct inet_diag_handler **inet_diag_table;
 
@@ -45,24 +46,22 @@ struct inet_diag_entry {
        u16 userlocks;
 };
 
-static struct sock *idiagnl;
-
 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
        RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
 
 static DEFINE_MUTEX(inet_diag_table_mutex);
 
-static const struct inet_diag_handler *inet_diag_lock_handler(int type)
+static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
 {
-       if (!inet_diag_table[type])
-               request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
-                              NETLINK_INET_DIAG, type);
+       if (!inet_diag_table[proto])
+               request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
+                              NETLINK_SOCK_DIAG, AF_INET, proto);
 
        mutex_lock(&inet_diag_table_mutex);
-       if (!inet_diag_table[type])
+       if (!inet_diag_table[proto])
                return ERR_PTR(-ENOENT);
 
-       return inet_diag_table[type];
+       return inet_diag_table[proto];
 }
 
 static inline void inet_diag_unlock_handler(
@@ -71,21 +70,21 @@ static inline void inet_diag_unlock_handler(
        mutex_unlock(&inet_diag_table_mutex);
 }
 
-static int inet_csk_diag_fill(struct sock *sk,
-                             struct sk_buff *skb,
-                             int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_sock *inet = inet_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_diag_msg *r;
        struct nlmsghdr  *nlh;
        void *info = NULL;
        struct inet_diag_meminfo  *minfo = NULL;
        unsigned char    *b = skb_tail_pointer(skb);
        const struct inet_diag_handler *handler;
+       int ext = req->idiag_ext;
 
-       handler = inet_diag_table[unlh->nlmsg_type];
+       handler = inet_diag_table[req->sdiag_protocol];
        BUG_ON(handler == NULL);
 
        nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
@@ -97,47 +96,55 @@ static int inet_csk_diag_fill(struct sock *sk,
        if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
                minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
 
-       if (ext & (1 << (INET_DIAG_INFO - 1)))
-               info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
-                                    handler->idiag_info_size);
-
-       if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
-               const size_t len = strlen(icsk->icsk_ca_ops->name);
-
-               strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
-                      icsk->icsk_ca_ops->name);
-       }
-
-       if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
-               RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
-
        r->idiag_family = sk->sk_family;
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
        r->idiag_retrans = 0;
 
        r->id.idiag_if = sk->sk_bound_dev_if;
-       r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
-       r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+       sock_diag_save_cookie(sk, r->id.idiag_cookie);
 
        r->id.idiag_sport = inet->inet_sport;
        r->id.idiag_dport = inet->inet_dport;
        r->id.idiag_src[0] = inet->inet_rcv_saddr;
        r->id.idiag_dst[0] = inet->inet_daddr;
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+       /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+        * hence this needs to be included regardless of socket family.
+        */
+       if (ext & (1 << (INET_DIAG_TOS - 1)))
+               RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
+#if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
                const struct ipv6_pinfo *np = inet6_sk(sk);
 
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-                              &np->rcv_saddr);
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-                              &np->daddr);
-               if (ext & (1 << (INET_DIAG_TOS - 1)))
-                       RTA_PUT_U8(skb, INET_DIAG_TOS, np->tclass);
+               *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = np->daddr;
+               if (ext & (1 << (INET_DIAG_TCLASS - 1)))
+                       RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
        }
 #endif
 
+       r->idiag_uid = sock_i_uid(sk);
+       r->idiag_inode = sock_i_ino(sk);
+
+       if (minfo) {
+               minfo->idiag_rmem = sk_rmem_alloc_get(sk);
+               minfo->idiag_wmem = sk->sk_wmem_queued;
+               minfo->idiag_fmem = sk->sk_forward_alloc;
+               minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+       }
+
+       if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
+               if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
+                       goto rtattr_failure;
+
+       if (icsk == NULL) {
+               r->idiag_rqueue = r->idiag_wqueue = 0;
+               goto out;
+       }
+
 #define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
@@ -158,14 +165,14 @@ static int inet_csk_diag_fill(struct sock *sk,
        }
 #undef EXPIRES_IN_MS
 
-       r->idiag_uid = sock_i_uid(sk);
-       r->idiag_inode = sock_i_ino(sk);
+       if (ext & (1 << (INET_DIAG_INFO - 1)))
+               info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
 
-       if (minfo) {
-               minfo->idiag_rmem = sk_rmem_alloc_get(sk);
-               minfo->idiag_wmem = sk->sk_wmem_queued;
-               minfo->idiag_fmem = sk->sk_forward_alloc;
-               minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+       if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
+               const size_t len = strlen(icsk->icsk_ca_ops->name);
+
+               strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
+                      icsk->icsk_ca_ops->name);
        }
 
        handler->idiag_get_info(sk, r, info);
@@ -174,6 +181,7 @@ static int inet_csk_diag_fill(struct sock *sk,
            icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
                icsk->icsk_ca_ops->get_info(sk, ext, skb);
 
+out:
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
@@ -182,10 +190,20 @@ nlmsg_failure:
        nlmsg_trim(skb, b);
        return -EMSGSIZE;
 }
+EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
+
+static int inet_csk_diag_fill(struct sock *sk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             const struct nlmsghdr *unlh)
+{
+       return inet_sk_diag_fill(sk, inet_csk(sk),
+                       skb, req, pid, seq, nlmsg_flags, unlh);
+}
 
 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
-                              struct sk_buff *skb, int ext, u32 pid,
-                              u32 seq, u16 nlmsg_flags,
+                              struct sk_buff *skb, struct inet_diag_req *req,
+                              u32 pid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
        long tmo;
@@ -206,8 +224,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->idiag_family       = tw->tw_family;
        r->idiag_retrans      = 0;
        r->id.idiag_if        = tw->tw_bound_dev_if;
-       r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
-       r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
+       sock_diag_save_cookie(tw, r->id.idiag_cookie);
        r->id.idiag_sport     = tw->tw_sport;
        r->id.idiag_dport     = tw->tw_dport;
        r->id.idiag_src[0]    = tw->tw_rcv_saddr;
@@ -219,15 +236,13 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->idiag_wqueue       = 0;
        r->idiag_uid          = 0;
        r->idiag_inode        = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (tw->tw_family == AF_INET6) {
                const struct inet6_timewait_sock *tw6 =
                                                inet6_twsk((struct sock *)tw);
 
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-                              &tw6->tw_v6_rcv_saddr);
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-                              &tw6->tw_v6_daddr);
+               *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
        }
 #endif
        nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
@@ -238,42 +253,31 @@ nlmsg_failure:
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
-                       int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+                       struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
                return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-                                          skb, ext, pid, seq, nlmsg_flags,
+                                          skb, r, pid, seq, nlmsg_flags,
                                           unlh);
-       return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
+       return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
 }
 
-static int inet_diag_get_exact(struct sk_buff *in_skb,
-                              const struct nlmsghdr *nlh)
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
+               const struct nlmsghdr *nlh, struct inet_diag_req *req)
 {
        int err;
        struct sock *sk;
-       struct inet_diag_req *req = NLMSG_DATA(nlh);
        struct sk_buff *rep;
-       struct inet_hashinfo *hashinfo;
-       const struct inet_diag_handler *handler;
-
-       handler = inet_diag_lock_handler(nlh->nlmsg_type);
-       if (IS_ERR(handler)) {
-               err = PTR_ERR(handler);
-               goto unlock;
-       }
 
-       hashinfo = handler->idiag_hashinfo;
        err = -EINVAL;
-
-       if (req->idiag_family == AF_INET) {
+       if (req->sdiag_family == AF_INET) {
                sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
                                 req->id.idiag_dport, req->id.idiag_src[0],
                                 req->id.idiag_sport, req->id.idiag_if);
        }
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-       else if (req->idiag_family == AF_INET6) {
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (req->sdiag_family == AF_INET6) {
                sk = inet6_lookup(&init_net, hashinfo,
                                  (struct in6_addr *)req->id.idiag_dst,
                                  req->id.idiag_dport,
@@ -283,29 +287,26 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
        }
 #endif
        else {
-               goto unlock;
+               goto out_nosk;
        }
 
        err = -ENOENT;
        if (sk == NULL)
-               goto unlock;
+               goto out_nosk;
 
-       err = -ESTALE;
-       if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
-            req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
-           ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
-            (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+       err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+       if (err)
                goto out;
 
        err = -ENOMEM;
        rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
                                     sizeof(struct inet_diag_meminfo) +
-                                    handler->idiag_info_size + 64)),
+                                    sizeof(struct tcp_info) + 64)),
                        GFP_KERNEL);
        if (!rep)
                goto out;
 
-       err = sk_diag_fill(sk, rep, req->idiag_ext,
+       err = sk_diag_fill(sk, rep, req,
                           NETLINK_CB(in_skb).pid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
@@ -313,7 +314,7 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
                kfree_skb(rep);
                goto out;
        }
-       err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
@@ -325,8 +326,25 @@ out:
                else
                        sock_put(sk);
        }
-unlock:
+out_nosk:
+       return err;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
+
+static int inet_diag_get_exact(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh,
+                              struct inet_diag_req *req)
+{
+       const struct inet_diag_handler *handler;
+       int err;
+
+       handler = inet_diag_lock_handler(req->sdiag_protocol);
+       if (IS_ERR(handler))
+               err = PTR_ERR(handler);
+       else
+               err = handler->dump_one(in_skb, nlh, req);
        inet_diag_unlock_handler(handler);
+
        return err;
 }
 
@@ -357,9 +375,12 @@ static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
 }
 
 
-static int inet_diag_bc_run(const void *bc, int len,
-                           const struct inet_diag_entry *entry)
+static int inet_diag_bc_run(const struct nlattr *_bc,
+               const struct inet_diag_entry *entry)
 {
+       const void *bc = nla_data(_bc);
+       int len = nla_len(_bc);
+
        while (len > 0) {
                int yes = 1;
                const struct inet_diag_bc_op *op = bc;
@@ -433,6 +454,35 @@ static int inet_diag_bc_run(const void *bc, int len,
        return len == 0;
 }
 
+int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
+{
+       struct inet_diag_entry entry;
+       struct inet_sock *inet = inet_sk(sk);
+
+       if (bc == NULL)
+               return 1;
+
+       entry.family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+       if (entry.family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+
+               entry.saddr = np->rcv_saddr.s6_addr32;
+               entry.daddr = np->daddr.s6_addr32;
+       } else
+#endif
+       {
+               entry.saddr = &inet->inet_rcv_saddr;
+               entry.daddr = &inet->inet_daddr;
+       }
+       entry.sport = inet->inet_num;
+       entry.dport = ntohs(inet->inet_dport);
+       entry.userlocks = sk->sk_userlocks;
+
+       return inet_diag_bc_run(bc, &entry);
+}
+EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
+
 static int valid_cc(const void *bc, int len, int cc)
 {
        while (len >= 0) {
@@ -489,57 +539,29 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
 
 static int inet_csk_diag_dump(struct sock *sk,
                              struct sk_buff *skb,
-                             struct netlink_callback *cb)
+                             struct netlink_callback *cb,
+                             struct inet_diag_req *r,
+                             const struct nlattr *bc)
 {
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-               struct inet_diag_entry entry;
-               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-                                                         sizeof(*r),
-                                                         INET_DIAG_REQ_BYTECODE);
-               struct inet_sock *inet = inet_sk(sk);
-
-               entry.family = sk->sk_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-               if (entry.family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
-
-                       entry.saddr = np->rcv_saddr.s6_addr32;
-                       entry.daddr = np->daddr.s6_addr32;
-               } else
-#endif
-               {
-                       entry.saddr = &inet->inet_rcv_saddr;
-                       entry.daddr = &inet->inet_daddr;
-               }
-               entry.sport = inet->inet_num;
-               entry.dport = ntohs(inet->inet_dport);
-               entry.userlocks = sk->sk_userlocks;
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
 
-               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
-                       return 0;
-       }
-
-       return inet_csk_diag_fill(sk, skb, r->idiag_ext,
+       return inet_csk_diag_fill(sk, skb, r,
                                  NETLINK_CB(cb->skb).pid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                               struct sk_buff *skb,
-                              struct netlink_callback *cb)
+                              struct netlink_callback *cb,
+                              struct inet_diag_req *r,
+                              const struct nlattr *bc)
 {
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+       if (bc != NULL) {
                struct inet_diag_entry entry;
-               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-                                                         sizeof(*r),
-                                                         INET_DIAG_REQ_BYTECODE);
 
                entry.family = tw->tw_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == AF_INET6) {
                        struct inet6_timewait_sock *tw6 =
                                                inet6_twsk((struct sock *)tw);
@@ -555,11 +577,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                entry.dport = ntohs(tw->tw_dport);
                entry.userlocks = 0;
 
-               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+               if (!inet_diag_bc_run(bc, &entry))
                        return 0;
        }
 
-       return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
+       return inet_twsk_diag_fill(tw, skb, r,
                                   NETLINK_CB(cb->skb).pid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
@@ -585,8 +607,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_retrans = req->retrans;
 
        r->id.idiag_if = sk->sk_bound_dev_if;
-       r->id.idiag_cookie[0] = (u32)(unsigned long)req;
-       r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
+       sock_diag_save_cookie(req, r->id.idiag_cookie);
 
        tmo = req->expires - jiffies;
        if (tmo < 0)
@@ -601,12 +622,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_wqueue = 0;
        r->idiag_uid = sock_i_uid(sk);
        r->idiag_inode = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-                              &inet6_rsk(req)->loc_addr);
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-                              &inet6_rsk(req)->rmt_addr);
+               *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
+               *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
        }
 #endif
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -619,13 +638,13 @@ nlmsg_failure:
 }
 
 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
-                              struct netlink_callback *cb)
+                              struct netlink_callback *cb,
+                              struct inet_diag_req *r,
+                              const struct nlattr *bc)
 {
        struct inet_diag_entry entry;
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt;
-       const struct nlattr *bc = NULL;
        struct inet_sock *inet = inet_sk(sk);
        int j, s_j;
        int reqnum, s_reqnum;
@@ -645,9 +664,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        if (!lopt || !lopt->qlen)
                goto out;
 
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-               bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
-                                    INET_DIAG_REQ_BYTECODE);
+       if (bc != NULL) {
                entry.sport = inet->inet_num;
                entry.userlocks = sk->sk_userlocks;
        }
@@ -667,21 +684,20 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
                        if (bc) {
                                entry.saddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                                        (entry.family == AF_INET6) ?
                                        inet6_rsk(req)->loc_addr.s6_addr32 :
 #endif
                                        &ireq->loc_addr;
                                entry.daddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                                        (entry.family == AF_INET6) ?
                                        inet6_rsk(req)->rmt_addr.s6_addr32 :
 #endif
                                        &ireq->rmt_addr;
                                entry.dport = ntohs(ireq->rmt_port);
 
-                               if (!inet_diag_bc_run(nla_data(bc),
-                                                     nla_len(bc), &entry))
+                               if (!inet_diag_bc_run(bc, &entry))
                                        continue;
                        }
 
@@ -704,19 +720,11 @@ out:
        return err;
 }
 
-static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
 {
        int i, num;
        int s_i, s_num;
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-       const struct inet_diag_handler *handler;
-       struct inet_hashinfo *hashinfo;
-
-       handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
-       if (IS_ERR(handler))
-               goto unlock;
-
-       hashinfo = handler->idiag_hashinfo;
 
        s_i = cb->args[1];
        s_num = num = cb->args[2];
@@ -741,6 +749,10 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                        continue;
                                }
 
+                               if (r->sdiag_family != AF_UNSPEC &&
+                                               sk->sk_family != r->sdiag_family)
+                                       goto next_listen;
+
                                if (r->id.idiag_sport != inet->inet_sport &&
                                    r->id.idiag_sport)
                                        goto next_listen;
@@ -750,7 +762,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                    cb->args[3] > 0)
                                        goto syn_recv;
 
-                               if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+                               if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
                                        spin_unlock_bh(&ilb->lock);
                                        goto done;
                                }
@@ -759,7 +771,7 @@ syn_recv:
                                if (!(r->idiag_states & TCPF_SYN_RECV))
                                        goto next_listen;
 
-                               if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
+                               if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
                                        spin_unlock_bh(&ilb->lock);
                                        goto done;
                                }
@@ -781,7 +793,7 @@ skip_listen_ht:
        }
 
        if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
-               goto unlock;
+               goto out;
 
        for (i = s_i; i <= hashinfo->ehash_mask; i++) {
                struct inet_ehash_bucket *head = &hashinfo->ehash[i];
@@ -806,13 +818,16 @@ skip_listen_ht:
                                goto next_normal;
                        if (!(r->idiag_states & (1 << sk->sk_state)))
                                goto next_normal;
+                       if (r->sdiag_family != AF_UNSPEC &&
+                                       sk->sk_family != r->sdiag_family)
+                               goto next_normal;
                        if (r->id.idiag_sport != inet->inet_sport &&
                            r->id.idiag_sport)
                                goto next_normal;
                        if (r->id.idiag_dport != inet->inet_dport &&
                            r->id.idiag_dport)
                                goto next_normal;
-                       if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+                       if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
                                spin_unlock_bh(lock);
                                goto done;
                        }
@@ -828,13 +843,16 @@ next_normal:
 
                                if (num < s_num)
                                        goto next_dying;
+                               if (r->sdiag_family != AF_UNSPEC &&
+                                               tw->tw_family != r->sdiag_family)
+                                       goto next_dying;
                                if (r->id.idiag_sport != tw->tw_sport &&
                                    r->id.idiag_sport)
                                        goto next_dying;
                                if (r->id.idiag_dport != tw->tw_dport &&
                                    r->id.idiag_dport)
                                        goto next_dying;
-                               if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
+                               if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
                                        spin_unlock_bh(lock);
                                        goto done;
                                }
@@ -848,15 +866,85 @@ next_dying:
 done:
        cb->args[1] = i;
        cb->args[2] = num;
-unlock:
+out:
+       ;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
+
+static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       const struct inet_diag_handler *handler;
+
+       handler = inet_diag_lock_handler(r->sdiag_protocol);
+       if (!IS_ERR(handler))
+               handler->dump(skb, cb, r, bc);
        inet_diag_unlock_handler(handler);
+
        return skb->len;
 }
 
-static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct nlattr *bc = NULL;
        int hdrlen = sizeof(struct inet_diag_req);
 
+       if (nlmsg_attrlen(cb->nlh, hdrlen))
+               bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+       return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
+}
+
+static inline int inet_diag_type2proto(int type)
+{
+       switch (type) {
+       case TCPDIAG_GETSOCK:
+               return IPPROTO_TCP;
+       case DCCPDIAG_GETSOCK:
+               return IPPROTO_DCCP;
+       default:
+               return 0;
+       }
+}
+
+static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
+       struct inet_diag_req req;
+       struct nlattr *bc = NULL;
+       int hdrlen = sizeof(struct inet_diag_req_compat);
+
+       req.sdiag_family = AF_UNSPEC; /* compatibility */
+       req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+
+       if (nlmsg_attrlen(cb->nlh, hdrlen))
+               bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+       return __inet_diag_dump(skb, cb, &req, bc);
+}
+
+static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh)
+{
+       struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
+       struct inet_diag_req req;
+
+       req.sdiag_family = rc->idiag_family;
+       req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+
+       return inet_diag_get_exact(in_skb, nlh, &req);
+}
+
+static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int hdrlen = sizeof(struct inet_diag_req_compat);
+
        if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
            nlmsg_len(nlh) < hdrlen)
                return -EINVAL;
@@ -873,28 +961,54 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                return -EINVAL;
                }
 
-               return netlink_dump_start(idiagnl, skb, nlh,
-                                         inet_diag_dump, NULL, 0);
+               return netlink_dump_start(sock_diag_nlsk, skb, nlh,
+                                         inet_diag_dump_compat, NULL, 0);
        }
 
-       return inet_diag_get_exact(skb, nlh);
+       return inet_diag_get_exact_compat(skb, nlh);
 }
 
-static DEFINE_MUTEX(inet_diag_mutex);
-
-static void inet_diag_rcv(struct sk_buff *skb)
+static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
 {
-       mutex_lock(&inet_diag_mutex);
-       netlink_rcv_skb(skb, &inet_diag_rcv_msg);
-       mutex_unlock(&inet_diag_mutex);
+       int hdrlen = sizeof(struct inet_diag_req);
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP) {
+               if (nlmsg_attrlen(h, hdrlen)) {
+                       struct nlattr *attr;
+                       attr = nlmsg_find_attr(h, hdrlen,
+                                              INET_DIAG_REQ_BYTECODE);
+                       if (attr == NULL ||
+                           nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
+                           inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
+                               return -EINVAL;
+               }
+
+               return netlink_dump_start(sock_diag_nlsk, skb, h,
+                                         inet_diag_dump, NULL, 0);
+       }
+
+       return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
 }
 
+static struct sock_diag_handler inet_diag_handler = {
+       .family = AF_INET,
+       .dump = inet_diag_handler_dump,
+};
+
+static struct sock_diag_handler inet6_diag_handler = {
+       .family = AF_INET6,
+       .dump = inet_diag_handler_dump,
+};
+
 int inet_diag_register(const struct inet_diag_handler *h)
 {
        const __u16 type = h->idiag_type;
        int err = -EINVAL;
 
-       if (type >= INET_DIAG_GETSOCK_MAX)
+       if (type >= IPPROTO_MAX)
                goto out;
 
        mutex_lock(&inet_diag_table_mutex);
@@ -913,7 +1027,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
 {
        const __u16 type = h->idiag_type;
 
-       if (type >= INET_DIAG_GETSOCK_MAX)
+       if (type >= IPPROTO_MAX)
                return;
 
        mutex_lock(&inet_diag_table_mutex);
@@ -924,7 +1038,7 @@ EXPORT_SYMBOL_GPL(inet_diag_unregister);
 
 static int __init inet_diag_init(void)
 {
-       const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
+       const int inet_diag_table_size = (IPPROTO_MAX *
                                          sizeof(struct inet_diag_handler *));
        int err = -ENOMEM;
 
@@ -932,25 +1046,35 @@ static int __init inet_diag_init(void)
        if (!inet_diag_table)
                goto out;
 
-       idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0,
-                                       inet_diag_rcv, NULL, THIS_MODULE);
-       if (idiagnl == NULL)
-               goto out_free_table;
-       err = 0;
+       err = sock_diag_register(&inet_diag_handler);
+       if (err)
+               goto out_free_nl;
+
+       err = sock_diag_register(&inet6_diag_handler);
+       if (err)
+               goto out_free_inet;
+
+       sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
 out:
        return err;
-out_free_table:
+
+out_free_inet:
+       sock_diag_unregister(&inet_diag_handler);
+out_free_nl:
        kfree(inet_diag_table);
        goto out;
 }
 
 static void __exit inet_diag_exit(void)
 {
-       netlink_kernel_release(idiagnl);
+       sock_diag_unregister(&inet6_diag_handler);
+       sock_diag_unregister(&inet_diag_handler);
+       sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
        kfree(inet_diag_table);
 }
 
 module_init(inet_diag_init);
 module_exit(inet_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);
index 3b34d1c862709e7bde3cd3665fad1e191692506d..29a07b6c7168f7369b13e25d8c96011c6118ec56 100644 (file)
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
 
        rt = skb_rtable(skb);
 
-       if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
+       if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
                goto sr_failed;
 
        if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
index d55110e9312023bdba81a9643b727ada7be06fee..2b53a1f7abf6bf57509279cff831b7082040e558 100644 (file)
@@ -46,7 +46,7 @@
 #include <net/rtnetlink.h>
 #include <net/gre.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -171,7 +171,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
 {
@@ -729,9 +729,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        if ((dst = rt->rt_gateway) == 0)
                                goto tx_error_icmp;
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
+                       struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
                        const struct in6_addr *addr6;
                        int addr_type;
 
@@ -799,7 +799,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        goto tx_error;
                }
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        else if (skb->protocol == htons(ETH_P_IPV6)) {
                struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 
@@ -835,6 +835,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
            (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
                struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+               if (max_headroom > dev->needed_headroom)
+                       dev->needed_headroom = max_headroom;
                if (!new_skb) {
                        ip_rt_put(rt);
                        dev->stats.tx_dropped++;
@@ -873,7 +875,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if ((iph->ttl = tiph->ttl) == 0) {
                if (skb->protocol == htons(ETH_P_IP))
                        iph->ttl = old_iph->ttl;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (skb->protocol == htons(ETH_P_IPV6))
                        iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
 #endif
index ec93335901ddc45d09f7c85c53270ea9a6772207..1e60f7679075b660f489bcd1a9f462c832c5e77e 100644 (file)
@@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
                     ) {
                        if (srrptr + 3 > srrspace)
                                break;
-                       if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
+                       if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
                                break;
                }
                if (srrptr + 3 <= srrspace) {
                        opt->is_changed = 1;
                        ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+                       ip_hdr(skb)->daddr = opt->nexthop;
                        optptr[2] = srrptr+4;
                } else if (net_ratelimit())
                        printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
@@ -640,6 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
        }
        if (srrptr <= srrspace) {
                opt->srr_is_hit = 1;
+               opt->nexthop = nexthop;
                opt->is_changed = 1;
        }
        return 0;
index 0bc95f3977d2aecff69f30c871f1b459588a34a7..ff302bde8890fb748b026e07d55b3ace325335d7 100644 (file)
@@ -206,7 +206,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh) {
                int res = neigh_output(neigh, skb);
 
@@ -319,6 +319,20 @@ int ip_output(struct sk_buff *skb)
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
+/*
+ * copy saddr and daddr, possibly using 64bit load/stores
+ * Equivalent to :
+ *   iph->saddr = fl4->saddr;
+ *   iph->daddr = fl4->daddr;
+ */
+static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
+{
+       BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
+                    offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
+       memcpy(&iph->saddr, &fl4->saddr,
+              sizeof(fl4->saddr) + sizeof(fl4->daddr));
+}
+
 int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
 {
        struct sock *sk = skb->sk;
@@ -381,8 +395,8 @@ packet_routed:
                iph->frag_off = 0;
        iph->ttl      = ip_select_ttl(inet, &rt->dst);
        iph->protocol = sk->sk_protocol;
-       iph->saddr    = fl4->saddr;
-       iph->daddr    = fl4->daddr;
+       ip_copy_addrs(iph, fl4);
+
        /* Transport layer set skb->h.foo itself. */
 
        if (inet_opt && inet_opt->opt.optlen) {
@@ -1337,8 +1351,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        ip_select_ident(iph, &rt->dst, sk);
        iph->ttl = ttl;
        iph->protocol = sk->sk_protocol;
-       iph->saddr = fl4->saddr;
-       iph->daddr = fl4->daddr;
+       ip_copy_addrs(iph, fl4);
 
        if (opt) {
                iph->ihl += opt->optlen>>2;
index 09ff51bf16a4c400d84f67caa7137de3678649d3..8aa87c19fa008f00adbcf8e0fa1b4411d119288b 100644 (file)
@@ -37,7 +37,7 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/compat.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/transp_v6.h>
 #endif
 
 /*
  *     SOL_IP control messages.
  */
+#define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb))
 
 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
 {
-       struct in_pktinfo info;
-       struct rtable *rt = skb_rtable(skb);
+       struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
 
        info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
-       if (rt) {
-               info.ipi_ifindex = rt->rt_iif;
-               info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
-       } else {
-               info.ipi_ifindex = 0;
-               info.ipi_spec_dst.s_addr = 0;
-       }
 
        put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
 }
@@ -515,7 +508,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                                                sock_owned_by_user(sk));
                if (inet->is_icsk) {
                        struct inet_connection_sock *icsk = inet_csk(sk);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        if (sk->sk_family == PF_INET ||
                            (!((1 << sk->sk_state) &
                               (TCPF_LISTEN | TCPF_CLOSE)) &&
@@ -526,7 +519,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                                if (opt)
                                        icsk->icsk_ext_hdr_len += opt->opt.optlen;
                                icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        }
 #endif
                }
@@ -992,20 +985,28 @@ e_inval:
 }
 
 /**
- * ip_queue_rcv_skb - Queue an skb into sock receive queue
+ * ipv4_pktinfo_prepare - transfert some info from rtable to skb
  * @sk: socket
  * @skb: buffer
  *
- * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
- * is not set, we drop skb dst entry now, while dst cache line is hot.
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst
+ * in skb->cb[] before dst drop.
+ * This way, receiver doesnt make cache line misses to read rtable.
  */
-int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+void ipv4_pktinfo_prepare(struct sk_buff *skb)
 {
-       if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
-               skb_dst_drop(skb);
-       return sock_queue_rcv_skb(sk, skb);
+       struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
+       const struct rtable *rt = skb_rtable(skb);
+
+       if (rt) {
+               pktinfo->ipi_ifindex = rt->rt_iif;
+               pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst;
+       } else {
+               pktinfo->ipi_ifindex = 0;
+               pktinfo->ipi_spec_dst.s_addr = 0;
+       }
+       skb_dst_drop(skb);
 }
-EXPORT_SYMBOL(ip_queue_rcv_skb);
 
 int ip_setsockopt(struct sock *sk, int level,
                int optname, char __user *optval, unsigned int optlen)
index 0da2afc97f32ffae2773098391aba0dcbd004903..7e4ec9fc2cef3c38bd0e659a8c36548ff25fe429 100644 (file)
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
                }
        }
 
+       /* no point in waiting if we could not bring up at least one device */
+       if (!ic_first_dev)
+               goto have_carrier;
+
        /* wait for a carrier on at least one device */
        start = jiffies;
        while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
@@ -763,13 +767,15 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
        struct sk_buff *skb;
        struct bootp_pkt *b;
        struct iphdr *h;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
 
        /* Allocate packet */
-       skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+       skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15,
                        GFP_KERNEL);
        if (!skb)
                return;
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
        memset(b, 0, sizeof(struct bootp_pkt));
 
@@ -822,8 +828,13 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
        skb->dev = dev;
        skb->protocol = htons(ETH_P_IP);
        if (dev_hard_header(skb, dev, ntohs(skb->protocol),
-                           dev->broadcast, dev->dev_addr, skb->len) < 0 ||
-           dev_queue_xmit(skb) < 0)
+                           dev->broadcast, dev->dev_addr, skb->len) < 0) {
+               kfree_skb(skb);
+               printk("E");
+               return;
+       }
+
+       if (dev_queue_xmit(skb) < 0)
                printk("E");
 }
 
index 065effd8349a81689828927c84142a778aebb650..413ed1ba7a5a16976d80ca39fadad9ec719dd6e5 100644 (file)
@@ -148,7 +148,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipip_get_stats(struct net_device *dev)
 {
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
        ipip_tunnel_link(ipn, nt);
        return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
 static int __net_init ipip_init_net(struct net *net)
 {
        struct ipip_net *ipn = net_generic(net, ipip_net_id);
+       struct ip_tunnel *t;
        int err;
 
        ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
        if ((err = register_netdev(ipn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(ipn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index 76a7f07b38b6edb13b48118c583ad754b96e1b11..8e54490ee3f490d6c07c99e87481b3a1dd395744 100644 (file)
@@ -1520,7 +1520,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
        struct mr_table *mrt;
        struct vif_device *v;
        int ct;
-       LIST_HEAD(list);
 
        if (event != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
@@ -1529,10 +1528,9 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
                v = &mrt->vif_table[0];
                for (ct = 0; ct < mrt->maxvif; ct++, v++) {
                        if (v->dev == dev)
-                               vif_delete(mrt, ct, 1, &list);
+                               vif_delete(mrt, ct, 1, NULL);
                }
        }
-       unregister_netdevice_many(&list);
        return NOTIFY_DONE;
 }
 
index 9899619ab9b8db0f9d8d02c8005c0e6bb01fda94..4f47e064e262c2f24e7cb13eacfcebff0fad86a3 100644 (file)
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        /* Change in oif may mean change in hh_len. */
        hh_len = skb_dst(skb)->dev->hard_header_len;
        if (skb_headroom(skb) < hh_len &&
-           pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+           pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+                               0, GFP_ATOMIC))
                return -1;
 
        return 0;
index 1dfc18a03fd4219fe9dd419011877d0a7ce3dfa7..74dfc9e5211fa70827b3411dd6c5bceb5773dffa 100644 (file)
@@ -27,7 +27,7 @@ config NF_CONNTRACK_IPV4
 
 config NF_CONNTRACK_PROC_COMPAT
        bool "proc/sysctl compatibility with old connection tracking"
-       depends on NF_CONNTRACK_IPV4
+       depends on NF_CONNTRACK_PROCFS && NF_CONNTRACK_IPV4
        default y
        help
          This option enables /proc and sysctl compatibility with the old
@@ -76,11 +76,21 @@ config IP_NF_MATCH_AH
 config IP_NF_MATCH_ECN
        tristate '"ecn" match support'
        depends on NETFILTER_ADVANCED
-       help
-         This option adds a `ECN' match, which allows you to match against
-         the IPv4 and TCP header ECN fields.
+       select NETFILTER_XT_MATCH_ECN
+       ---help---
+       This is a backwards-compat option for the user's convenience
+       (e.g. when running oldconfig). It selects
+       CONFIG_NETFILTER_XT_MATCH_ECN.
+
+config IP_NF_MATCH_RPFILTER
+       tristate '"rpfilter" reverse path filter match support'
+       depends on NETFILTER_ADVANCED
+       ---help---
+         This option allows you to match packets whose replies would
+         go out via the interface the packet came in.
 
          To compile it as a module, choose M here.  If unsure, say N.
+         The module will be called ipt_rpfilter.
 
 config IP_NF_MATCH_TTL
        tristate '"ttl" match support'
@@ -325,7 +335,6 @@ config IP_NF_TARGET_TTL
 # raw + specific targets
 config IP_NF_RAW
        tristate  'raw table support (required for NOTRACK/TRACE)'
-       depends on NETFILTER_ADVANCED
        help
          This option adds a `raw' table to iptables. This table is the very
          first in the netfilter framework and hooks in at the PREROUTING
index dca2082ec68339cc5fce2af9b6fbbe296298c11c..213a462b739bbd937ed511485db824aa94e7d26c 100644 (file)
@@ -49,7 +49,7 @@ obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
 
 # matches
 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
-obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
index e59aabd0eae4f9a63559672914db90d5a54c97b6..a057fe64debde83a8b2fe1d29a78d7eb00954e8b 100644 (file)
@@ -404,6 +404,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
        int status, type, pid, flags;
        unsigned int nlmsglen, skblen;
        struct nlmsghdr *nlh;
+       bool enable_timestamp = false;
 
        skblen = skb->len;
        if (skblen < sizeof(*nlh))
@@ -441,12 +442,13 @@ __ipq_rcv_skb(struct sk_buff *skb)
                        RCV_SKB_FAIL(-EBUSY);
                }
        } else {
-               net_enable_timestamp();
+               enable_timestamp = true;
                peer_pid = pid;
        }
 
        spin_unlock_bh(&queue_lock);
-
+       if (enable_timestamp)
+               net_enable_timestamp();
        status = ipq_receive_peer(NLMSG_DATA(nlh), type,
                                  nlmsglen - NLMSG_LENGTH(0));
        if (status < 0)
index 9931152a78b54e4512bc95bbbe720258ebb44d09..2f210c79dc876c069228e07dda15655e372e5468 100644 (file)
@@ -30,9 +30,9 @@ MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
 /* FIXME: Multiple targets. --RR */
 static int masquerade_tg_check(const struct xt_tgchk_param *par)
 {
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
-       if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
+       if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
                pr_debug("bad MAP_IPS.\n");
                return -EINVAL;
        }
@@ -49,8 +49,8 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
        struct nf_conn *ct;
        struct nf_conn_nat *nat;
        enum ip_conntrack_info ctinfo;
-       struct nf_nat_range newrange;
-       const struct nf_nat_multi_range_compat *mr;
+       struct nf_nat_ipv4_range newrange;
+       const struct nf_nat_ipv4_multi_range_compat *mr;
        const struct rtable *rt;
        __be32 newsrc;
 
@@ -79,13 +79,13 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
        nat->masq_index = par->out->ifindex;
 
        /* Transfer from original range. */
-       newrange = ((struct nf_nat_range)
-               { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+       newrange = ((struct nf_nat_ipv4_range)
+               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
                  newsrc, newsrc,
                  mr->range[0].min, mr->range[0].max });
 
        /* Hand modified range to generic setup. */
-       return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC);
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
 }
 
 static int
@@ -139,7 +139,7 @@ static struct xt_target masquerade_tg_reg __read_mostly = {
        .name           = "MASQUERADE",
        .family         = NFPROTO_IPV4,
        .target         = masquerade_tg,
-       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
        .table          = "nat",
        .hooks          = 1 << NF_INET_POST_ROUTING,
        .checkentry     = masquerade_tg_check,
index 6cdb298f103570bcf3f1b36bee23c99b32dec8a9..b5bfbbabf70d3d031a3689ec94895ab9229e91af 100644 (file)
@@ -24,9 +24,9 @@ MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets");
 
 static int netmap_tg_check(const struct xt_tgchk_param *par)
 {
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
-       if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) {
+       if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
                pr_debug("bad MAP_IPS.\n");
                return -EINVAL;
        }
@@ -43,8 +43,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        __be32 new_ip, netmask;
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
-       struct nf_nat_range newrange;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_ipv4_range newrange;
 
        NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
                     par->hooknum == NF_INET_POST_ROUTING ||
@@ -61,8 +61,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
                new_ip = ip_hdr(skb)->saddr & ~netmask;
        new_ip |= mr->range[0].min_ip & netmask;
 
-       newrange = ((struct nf_nat_range)
-               { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+       newrange = ((struct nf_nat_ipv4_range)
+               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
                  new_ip, new_ip,
                  mr->range[0].min, mr->range[0].max });
 
@@ -74,7 +74,7 @@ static struct xt_target netmap_tg_reg __read_mostly = {
        .name           = "NETMAP",
        .family         = NFPROTO_IPV4,
        .target         = netmap_tg,
-       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
        .table          = "nat",
        .hooks          = (1 << NF_INET_PRE_ROUTING) |
                          (1 << NF_INET_POST_ROUTING) |
index 18a0656505a02046099219b0563a1617bce321dd..7c0103a5203e2bca7f3e103616e478793a18185e 100644 (file)
@@ -28,9 +28,9 @@ MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
 /* FIXME: Take multiple ranges --RR */
 static int redirect_tg_check(const struct xt_tgchk_param *par)
 {
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
-       if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
+       if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
                pr_debug("bad MAP_IPS.\n");
                return -EINVAL;
        }
@@ -47,8 +47,8 @@ redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        __be32 newdst;
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
-       struct nf_nat_range newrange;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_ipv4_range newrange;
 
        NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
                     par->hooknum == NF_INET_LOCAL_OUT);
@@ -76,20 +76,20 @@ redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
        }
 
        /* Transfer from original range. */
-       newrange = ((struct nf_nat_range)
-               { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+       newrange = ((struct nf_nat_ipv4_range)
+               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
                  newdst, newdst,
                  mr->range[0].min, mr->range[0].max });
 
        /* Hand modified range to generic setup. */
-       return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_DST);
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
 }
 
 static struct xt_target redirect_tg_reg __read_mostly = {
        .name           = "REDIRECT",
        .family         = NFPROTO_IPV4,
        .target         = redirect_tg,
-       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
        .table          = "nat",
        .hooks          = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
        .checkentry     = redirect_tg_check,
index b5508151e5476a23d3285b2bf919dba3132bc322..ba5756d20165ec0d5bb6d41359e219e03d9bdb8d 100644 (file)
@@ -65,7 +65,7 @@ static unsigned int flushtimeout = 10;
 module_param(flushtimeout, uint, 0600);
 MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
 
-static int nflog = 1;
+static bool nflog = true;
 module_param(nflog, bool, 0400);
 MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
 
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
deleted file mode 100644 (file)
index 2b57e52..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-/* IP tables module for matching the value of the IPv4 and TCP ECN bits
- *
- * (C) 2002 by Harald Welte <laforge@gnumonks.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_ecn.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match for IPv4");
-MODULE_LICENSE("GPL");
-
-static inline bool match_ip(const struct sk_buff *skb,
-                           const struct ipt_ecn_info *einfo)
-{
-       return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
-              !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
-}
-
-static inline bool match_tcp(const struct sk_buff *skb,
-                            const struct ipt_ecn_info *einfo,
-                            bool *hotdrop)
-{
-       struct tcphdr _tcph;
-       const struct tcphdr *th;
-
-       /* In practice, TCP match does this, so can't fail.  But let's
-        * be good citizens.
-        */
-       th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
-       if (th == NULL) {
-               *hotdrop = false;
-               return false;
-       }
-
-       if (einfo->operation & IPT_ECN_OP_MATCH_ECE) {
-               if (einfo->invert & IPT_ECN_OP_MATCH_ECE) {
-                       if (th->ece == 1)
-                               return false;
-               } else {
-                       if (th->ece == 0)
-                               return false;
-               }
-       }
-
-       if (einfo->operation & IPT_ECN_OP_MATCH_CWR) {
-               if (einfo->invert & IPT_ECN_OP_MATCH_CWR) {
-                       if (th->cwr == 1)
-                               return false;
-               } else {
-                       if (th->cwr == 0)
-                               return false;
-               }
-       }
-
-       return true;
-}
-
-static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
-{
-       const struct ipt_ecn_info *info = par->matchinfo;
-
-       if (info->operation & IPT_ECN_OP_MATCH_IP)
-               if (!match_ip(skb, info))
-                       return false;
-
-       if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
-               if (!match_tcp(skb, info, &par->hotdrop))
-                       return false;
-       }
-
-       return true;
-}
-
-static int ecn_mt_check(const struct xt_mtchk_param *par)
-{
-       const struct ipt_ecn_info *info = par->matchinfo;
-       const struct ipt_ip *ip = par->entryinfo;
-
-       if (info->operation & IPT_ECN_OP_MATCH_MASK)
-               return -EINVAL;
-
-       if (info->invert & IPT_ECN_OP_MATCH_MASK)
-               return -EINVAL;
-
-       if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
-           (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
-               pr_info("cannot match TCP bits in rule for non-tcp packets\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static struct xt_match ecn_mt_reg __read_mostly = {
-       .name           = "ecn",
-       .family         = NFPROTO_IPV4,
-       .match          = ecn_mt,
-       .matchsize      = sizeof(struct ipt_ecn_info),
-       .checkentry     = ecn_mt_check,
-       .me             = THIS_MODULE,
-};
-
-static int __init ecn_mt_init(void)
-{
-       return xt_register_match(&ecn_mt_reg);
-}
-
-static void __exit ecn_mt_exit(void)
-{
-       xt_unregister_match(&ecn_mt_reg);
-}
-
-module_init(ecn_mt_init);
-module_exit(ecn_mt_exit);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
new file mode 100644 (file)
index 0000000..31371be
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2011 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * based on fib_frontend.c; Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/ip_fib.h>
+#include <net/route.h>
+
+#include <linux/netfilter/xt_rpfilter.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match");
+
+/* don't try to find route from mcast/bcast/zeronet */
+static __be32 rpfilter_get_saddr(__be32 addr)
+{
+       if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
+           ipv4_is_zeronet(addr))
+               return 0;
+       return addr;
+}
+
+static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
+                               const struct net_device *dev, u8 flags)
+{
+       struct fib_result res;
+       bool dev_match;
+       struct net *net = dev_net(dev);
+       int ret __maybe_unused;
+
+       if (fib_lookup(net, fl4, &res))
+               return false;
+
+       if (res.type != RTN_UNICAST) {
+               if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL))
+                       return false;
+       }
+       dev_match = false;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+       for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+               struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+               if (nh->nh_dev == dev) {
+                       dev_match = true;
+                       break;
+               }
+       }
+#else
+       if (FIB_RES_DEV(res) == dev)
+               dev_match = true;
+#endif
+       if (dev_match || flags & XT_RPFILTER_LOOSE)
+               return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
+       return dev_match;
+}
+
+static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_rpfilter_info *info;
+       const struct iphdr *iph;
+       struct flowi4 flow;
+       bool invert;
+
+       info = par->matchinfo;
+       invert = info->flags & XT_RPFILTER_INVERT;
+
+       if (par->in->flags & IFF_LOOPBACK)
+               return true ^ invert;
+
+       iph = ip_hdr(skb);
+       if (ipv4_is_multicast(iph->daddr)) {
+               if (ipv4_is_zeronet(iph->saddr))
+                       return ipv4_is_local_multicast(iph->daddr) ^ invert;
+               flow.flowi4_iif = 0;
+       } else {
+               flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex;
+       }
+
+       flow.daddr = iph->saddr;
+       flow.saddr = rpfilter_get_saddr(iph->daddr);
+       flow.flowi4_oif = 0;
+       flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+       flow.flowi4_tos = RT_TOS(iph->tos);
+       flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+
+       return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert;
+}
+
+static int rpfilter_check(const struct xt_mtchk_param *par)
+{
+       const struct xt_rpfilter_info *info = par->matchinfo;
+       unsigned int options = ~XT_RPFILTER_OPTION_MASK;
+       if (info->flags & options) {
+               pr_info("unknown options encountered");
+               return -EINVAL;
+       }
+
+       if (strcmp(par->table, "mangle") != 0 &&
+           strcmp(par->table, "raw") != 0) {
+               pr_info("match only valid in the \'raw\' "
+                       "or \'mangle\' tables, not \'%s\'.\n", par->table);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct xt_match rpfilter_mt_reg __read_mostly = {
+       .name           = "rpfilter",
+       .family         = NFPROTO_IPV4,
+       .checkentry     = rpfilter_check,
+       .match          = rpfilter_mt,
+       .matchsize      = sizeof(struct xt_rpfilter_info),
+       .hooks          = (1 << NF_INET_PRE_ROUTING),
+       .me             = THIS_MODULE
+};
+
+static int __init rpfilter_mt_init(void)
+{
+       return xt_register_match(&rpfilter_mt_reg);
+}
+
+static void __exit rpfilter_mt_exit(void)
+{
+       xt_unregister_match(&rpfilter_mt_reg);
+}
+
+module_init(rpfilter_mt_init);
+module_exit(rpfilter_mt_exit);
index c37641e819f2e9025854b95edc8de145831cb70a..0e58f09e59fb345501123159952027f8f854e539 100644 (file)
@@ -52,7 +52,7 @@ iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
 static struct nf_hook_ops *filter_ops __read_mostly;
 
 /* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
 module_param(forward, bool, 0000);
 
 static int __net_init iptable_filter_net_init(struct net *net)
index 447bc5cfdc6c83703215db60a9ea631ef46774c8..acdd002bb5405876522223a8e979ff4daf2d1c81 100644 (file)
@@ -30,7 +30,6 @@
 #include <net/netfilter/nf_nat_helper.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 
 static DEFINE_SPINLOCK(nf_nat_lock);
@@ -57,7 +56,7 @@ hash_by_src(const struct net *net, u16 zone,
        /* Original src, to ensure we map it consistently if poss. */
        hash = jhash_3words((__force u32)tuple->src.u3.ip,
                            (__force u32)tuple->src.u.all ^ zone,
-                           tuple->dst.protonum, 0);
+                           tuple->dst.protonum, nf_conntrack_hash_rnd);
        return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
 }
 
@@ -82,14 +81,14 @@ EXPORT_SYMBOL(nf_nat_used_tuple);
  * that meet the constraints of range. */
 static int
 in_range(const struct nf_conntrack_tuple *tuple,
-        const struct nf_nat_range *range)
+        const struct nf_nat_ipv4_range *range)
 {
        const struct nf_nat_protocol *proto;
        int ret = 0;
 
        /* If we are supposed to map IPs, then we must be in the
           range specified, otherwise let this drag us onto a new src IP. */
-       if (range->flags & IP_NAT_RANGE_MAP_IPS) {
+       if (range->flags & NF_NAT_RANGE_MAP_IPS) {
                if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
                    ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
                        return 0;
@@ -97,8 +96,8 @@ in_range(const struct nf_conntrack_tuple *tuple,
 
        rcu_read_lock();
        proto = __nf_nat_proto_find(tuple->dst.protonum);
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
-           proto->in_range(tuple, IP_NAT_MANIP_SRC,
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
+           proto->in_range(tuple, NF_NAT_MANIP_SRC,
                            &range->min, &range->max))
                ret = 1;
        rcu_read_unlock();
@@ -123,7 +122,7 @@ static int
 find_appropriate_src(struct net *net, u16 zone,
                     const struct nf_conntrack_tuple *tuple,
                     struct nf_conntrack_tuple *result,
-                    const struct nf_nat_range *range)
+                    const struct nf_nat_ipv4_range *range)
 {
        unsigned int h = hash_by_src(net, zone, tuple);
        const struct nf_conn_nat *nat;
@@ -157,7 +156,7 @@ find_appropriate_src(struct net *net, u16 zone,
 */
 static void
 find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
-                   const struct nf_nat_range *range,
+                   const struct nf_nat_ipv4_range *range,
                    const struct nf_conn *ct,
                    enum nf_nat_manip_type maniptype)
 {
@@ -166,10 +165,10 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
        u_int32_t minip, maxip, j;
 
        /* No IP mapping?  Do nothing. */
-       if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
+       if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
                return;
 
-       if (maniptype == IP_NAT_MANIP_SRC)
+       if (maniptype == NF_NAT_MANIP_SRC)
                var_ipp = &tuple->src.u3.ip;
        else
                var_ipp = &tuple->dst.u3.ip;
@@ -189,7 +188,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
        minip = ntohl(range->min_ip);
        maxip = ntohl(range->max_ip);
        j = jhash_2words((__force u32)tuple->src.u3.ip,
-                        range->flags & IP_NAT_RANGE_PERSISTENT ?
+                        range->flags & NF_NAT_RANGE_PERSISTENT ?
                                0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
        j = ((u64)j * (maxip - minip + 1)) >> 32;
        *var_ipp = htonl(minip + j);
@@ -204,7 +203,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
 static void
 get_unique_tuple(struct nf_conntrack_tuple *tuple,
                 const struct nf_conntrack_tuple *orig_tuple,
-                const struct nf_nat_range *range,
+                const struct nf_nat_ipv4_range *range,
                 struct nf_conn *ct,
                 enum nf_nat_manip_type maniptype)
 {
@@ -219,8 +218,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
           This is only required for source (ie. NAT/masq) mappings.
           So far, we don't do local source mappings, so multiple
           manips not an issue.  */
-       if (maniptype == IP_NAT_MANIP_SRC &&
-           !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
+       if (maniptype == NF_NAT_MANIP_SRC &&
+           !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
                /* try the original tuple first */
                if (in_range(orig_tuple, range)) {
                        if (!nf_nat_used_tuple(orig_tuple, ct)) {
@@ -247,8 +246,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
        proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
 
        /* Only bother mapping if it's not already in range and unique */
-       if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
-               if (range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) {
+       if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
+               if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
                        if (proto->in_range(tuple, maniptype, &range->min,
                                            &range->max) &&
                            (range->min.all == range->max.all ||
@@ -267,7 +266,7 @@ out:
 
 unsigned int
 nf_nat_setup_info(struct nf_conn *ct,
-                 const struct nf_nat_range *range,
+                 const struct nf_nat_ipv4_range *range,
                  enum nf_nat_manip_type maniptype)
 {
        struct net *net = nf_ct_net(ct);
@@ -284,8 +283,8 @@ nf_nat_setup_info(struct nf_conn *ct,
                }
        }
 
-       NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
-                    maniptype == IP_NAT_MANIP_DST);
+       NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
+                    maniptype == NF_NAT_MANIP_DST);
        BUG_ON(nf_nat_initialized(ct, maniptype));
 
        /* What we've got will look like inverse of reply. Normally
@@ -306,13 +305,13 @@ nf_nat_setup_info(struct nf_conn *ct,
                nf_conntrack_alter_reply(ct, &reply);
 
                /* Non-atomic: we own this at the moment. */
-               if (maniptype == IP_NAT_MANIP_SRC)
+               if (maniptype == NF_NAT_MANIP_SRC)
                        ct->status |= IPS_SRC_NAT;
                else
                        ct->status |= IPS_DST_NAT;
        }
 
-       if (maniptype == IP_NAT_MANIP_SRC) {
+       if (maniptype == NF_NAT_MANIP_SRC) {
                unsigned int srchash;
 
                srchash = hash_by_src(net, nf_ct_zone(ct),
@@ -327,7 +326,7 @@ nf_nat_setup_info(struct nf_conn *ct,
        }
 
        /* It's done. */
-       if (maniptype == IP_NAT_MANIP_DST)
+       if (maniptype == NF_NAT_MANIP_DST)
                ct->status |= IPS_DST_NAT_DONE;
        else
                ct->status |= IPS_SRC_NAT_DONE;
@@ -361,7 +360,7 @@ manip_pkt(u_int16_t proto,
 
        iph = (void *)skb->data + iphdroff;
 
-       if (maniptype == IP_NAT_MANIP_SRC) {
+       if (maniptype == NF_NAT_MANIP_SRC) {
                csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
                iph->saddr = target->src.u3.ip;
        } else {
@@ -381,7 +380,7 @@ unsigned int nf_nat_packet(struct nf_conn *ct,
        unsigned long statusbit;
        enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
 
-       if (mtype == IP_NAT_MANIP_SRC)
+       if (mtype == NF_NAT_MANIP_SRC)
                statusbit = IPS_SRC_NAT;
        else
                statusbit = IPS_DST_NAT;
@@ -414,8 +413,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
                struct icmphdr icmp;
                struct iphdr ip;
        } *inside;
-       const struct nf_conntrack_l4proto *l4proto;
-       struct nf_conntrack_tuple inner, target;
+       struct nf_conntrack_tuple target;
        int hdrlen = ip_hdrlen(skb);
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
        unsigned long statusbit;
@@ -447,7 +445,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
                        return 0;
        }
 
-       if (manip == IP_NAT_MANIP_SRC)
+       if (manip == NF_NAT_MANIP_SRC)
                statusbit = IPS_SRC_NAT;
        else
                statusbit = IPS_DST_NAT;
@@ -463,16 +461,6 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
                 "dir %s\n", skb, manip,
                 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
 
-       /* rcu_read_lock()ed by nf_hook_slow */
-       l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
-
-       if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
-                            (hdrlen +
-                             sizeof(struct icmphdr) + inside->ip.ihl * 4),
-                            (u_int16_t)AF_INET, inside->ip.protocol,
-                            &inner, l3proto, l4proto))
-               return 0;
-
        /* Change inner back to look like incoming packet.  We do the
           opposite manip on this hook to normal, because it might not
           pass all hooks (locally-generated ICMP).  Consider incoming
@@ -575,26 +563,6 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 
-static const struct nf_nat_protocol *
-nf_nat_proto_find_get(u_int8_t protonum)
-{
-       const struct nf_nat_protocol *p;
-
-       rcu_read_lock();
-       p = __nf_nat_proto_find(protonum);
-       if (!try_module_get(p->me))
-               p = &nf_nat_unknown_protocol;
-       rcu_read_unlock();
-
-       return p;
-}
-
-static void
-nf_nat_proto_put(const struct nf_nat_protocol *p)
-{
-       module_put(p->me);
-}
-
 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
        [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
        [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
@@ -602,7 +570,7 @@ static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
 
 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
                                     const struct nf_conn *ct,
-                                    struct nf_nat_range *range)
+                                    struct nf_nat_ipv4_range *range)
 {
        struct nlattr *tb[CTA_PROTONAT_MAX+1];
        const struct nf_nat_protocol *npt;
@@ -612,21 +580,23 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr,
        if (err < 0)
                return err;
 
-       npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
+       rcu_read_lock();
+       npt = __nf_nat_proto_find(nf_ct_protonum(ct));
        if (npt->nlattr_to_range)
                err = npt->nlattr_to_range(tb, range);
-       nf_nat_proto_put(npt);
+       rcu_read_unlock();
        return err;
 }
 
 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
        [CTA_NAT_MINIP]         = { .type = NLA_U32 },
        [CTA_NAT_MAXIP]         = { .type = NLA_U32 },
+       [CTA_NAT_PROTO]         = { .type = NLA_NESTED },
 };
 
 static int
 nfnetlink_parse_nat(const struct nlattr *nat,
-                   const struct nf_conn *ct, struct nf_nat_range *range)
+                   const struct nf_conn *ct, struct nf_nat_ipv4_range *range)
 {
        struct nlattr *tb[CTA_NAT_MAX+1];
        int err;
@@ -646,7 +616,7 @@ nfnetlink_parse_nat(const struct nlattr *nat,
                range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
 
        if (range->min_ip)
-               range->flags |= IP_NAT_RANGE_MAP_IPS;
+               range->flags |= NF_NAT_RANGE_MAP_IPS;
 
        if (!tb[CTA_NAT_PROTO])
                return 0;
@@ -663,7 +633,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
                          enum nf_nat_manip_type manip,
                          const struct nlattr *attr)
 {
-       struct nf_nat_range range;
+       struct nf_nat_ipv4_range range;
 
        if (nfnetlink_parse_nat(attr, ct, &range) < 0)
                return -EINVAL;
index b9a1136addbdbca5bf6f95d10bcb789e2cb4036d..dc1dd912baf4689df151a0756425f6bde4151cd4 100644 (file)
@@ -398,7 +398,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
 static void ip_nat_q931_expect(struct nf_conn *new,
                               struct nf_conntrack_expect *this)
 {
-       struct nf_nat_range range;
+       struct nf_nat_ipv4_range range;
 
        if (this->tuple.src.u3.ip != 0) {       /* Only accept calls from GK */
                nf_nat_follow_master(new, this);
@@ -409,16 +409,16 @@ static void ip_nat_q931_expect(struct nf_conn *new,
        BUG_ON(new->status & IPS_NAT_DONE_MASK);
 
        /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
+       range.flags = NF_NAT_RANGE_MAP_IPS;
        range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
-       nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC);
+       nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
-       range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
        range.min = range.max = this->saved_proto;
        range.min_ip = range.max_ip =
            new->master->tuplehash[!this->dir].tuple.src.u3.ip;
-       nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST);
+       nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
@@ -496,21 +496,21 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
 static void ip_nat_callforwarding_expect(struct nf_conn *new,
                                         struct nf_conntrack_expect *this)
 {
-       struct nf_nat_range range;
+       struct nf_nat_ipv4_range range;
 
        /* This must be a fresh one. */
        BUG_ON(new->status & IPS_NAT_DONE_MASK);
 
        /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
+       range.flags = NF_NAT_RANGE_MAP_IPS;
        range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
-       nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC);
+       nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
-       range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
        range.min = range.max = this->saved_proto;
        range.min_ip = range.max_ip = this->saved_ip;
-       nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST);
+       nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
index ebc5f8894f99eb06b8ef5616cafc06b427dd6a42..af65958f630877b2c92150b77ed5b82a5d7567be 100644 (file)
@@ -253,12 +253,6 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
        struct udphdr *udph;
        int datalen, oldlen;
 
-       /* UDP helpers might accidentally mangle the wrong packet */
-       iph = ip_hdr(skb);
-       if (skb->len < iph->ihl*4 + sizeof(*udph) +
-                              match_offset + match_len)
-               return 0;
-
        if (!skb_make_writable(skb, skb->len))
                return 0;
 
@@ -430,22 +424,22 @@ nf_nat_seq_adjust(struct sk_buff *skb,
 void nf_nat_follow_master(struct nf_conn *ct,
                          struct nf_conntrack_expect *exp)
 {
-       struct nf_nat_range range;
+       struct nf_nat_ipv4_range range;
 
        /* This must be a fresh one. */
        BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
        /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
+       range.flags = NF_NAT_RANGE_MAP_IPS;
        range.min_ip = range.max_ip
                = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
-       nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
-       range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
        range.min = range.max = exp->saved_proto;
        range.min_ip = range.max_ip
                = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
-       nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 }
 EXPORT_SYMBOL(nf_nat_follow_master);
index 3e8284ba46b8eb6274f7aa84f2b314142e31b837..c273d58980ae3d9ced661297fc7ff380c6761546 100644 (file)
@@ -47,7 +47,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
        struct nf_conntrack_tuple t;
        const struct nf_ct_pptp_master *ct_pptp_info;
        const struct nf_nat_pptp *nat_pptp_info;
-       struct nf_nat_range range;
+       struct nf_nat_ipv4_range range;
 
        ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
        nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
@@ -88,24 +88,24 @@ static void pptp_nat_expected(struct nf_conn *ct,
        BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
        /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
+       range.flags = NF_NAT_RANGE_MAP_IPS;
        range.min_ip = range.max_ip
                = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
        if (exp->dir == IP_CT_DIR_ORIGINAL) {
-               range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+               range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
                range.min = range.max = exp->saved_proto;
        }
-       nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
+       range.flags = NF_NAT_RANGE_MAP_IPS;
        range.min_ip = range.max_ip
                = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
        if (exp->dir == IP_CT_DIR_REPLY) {
-               range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+               range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
                range.min = range.max = exp->saved_proto;
        }
-       nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 }
 
 /* outbound packets == from PNS to PAC */
index a3d997618602496867a7456b8205c80585d2ec75..9993bc93e102d562b3f7c12c5dc79f388608454c 100644 (file)
@@ -26,7 +26,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
 {
        __be16 port;
 
-       if (maniptype == IP_NAT_MANIP_SRC)
+       if (maniptype == NF_NAT_MANIP_SRC)
                port = tuple->src.u.all;
        else
                port = tuple->dst.u.all;
@@ -37,7 +37,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
 EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
 
 void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-                              const struct nf_nat_range *range,
+                              const struct nf_nat_ipv4_range *range,
                               enum nf_nat_manip_type maniptype,
                               const struct nf_conn *ct,
                               u_int16_t *rover)
@@ -46,15 +46,15 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
        __be16 *portptr;
        u_int16_t off;
 
-       if (maniptype == IP_NAT_MANIP_SRC)
+       if (maniptype == NF_NAT_MANIP_SRC)
                portptr = &tuple->src.u.all;
        else
                portptr = &tuple->dst.u.all;
 
        /* If no range specified... */
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
                /* If it's dst rewrite, can't change port */
-               if (maniptype == IP_NAT_MANIP_DST)
+               if (maniptype == NF_NAT_MANIP_DST)
                        return;
 
                if (ntohs(*portptr) < 1024) {
@@ -75,9 +75,9 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
                range_size = ntohs(range->max.all) - min + 1;
        }
 
-       if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
+       if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
                off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
-                                                maniptype == IP_NAT_MANIP_SRC
+                                                maniptype == NF_NAT_MANIP_SRC
                                                 ? tuple->dst.u.all
                                                 : tuple->src.u.all);
        else
@@ -87,7 +87,7 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
                *portptr = htons(min + off % range_size);
                if (++i != range_size && nf_nat_used_tuple(tuple, ct))
                        continue;
-               if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
+               if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM))
                        *rover = off;
                return;
        }
@@ -96,31 +96,19 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
 EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
 
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
-                                const struct nf_nat_range *range)
-{
-       NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all);
-       NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all);
-       return 0;
-
-nla_put_failure:
-       return -1;
-}
-EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
-
 int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-                                struct nf_nat_range *range)
+                                struct nf_nat_ipv4_range *range)
 {
        if (tb[CTA_PROTONAT_PORT_MIN]) {
                range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
                range->max.all = range->min.tcp.port;
-               range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+               range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
        if (tb[CTA_PROTONAT_PORT_MAX]) {
                range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
-               range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+               range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr);
+EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
 #endif
index 570faf2667b26e70f3f7ddc6e2f9c89cdbfdcfd9..3f67138d187cb25080900bf75351ed936fdbeeec 100644 (file)
@@ -24,7 +24,7 @@ static u_int16_t dccp_port_rover;
 
 static void
 dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_range *range,
+                 const struct nf_nat_ipv4_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
@@ -54,7 +54,7 @@ dccp_manip_pkt(struct sk_buff *skb,
        iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct dccp_hdr *)(skb->data + hdroff);
 
-       if (maniptype == IP_NAT_MANIP_SRC) {
+       if (maniptype == NF_NAT_MANIP_SRC) {
                oldip = iph->saddr;
                newip = tuple->src.u3.ip;
                newport = tuple->src.u.dccp.port;
@@ -80,12 +80,10 @@ dccp_manip_pkt(struct sk_buff *skb,
 
 static const struct nf_nat_protocol nf_nat_protocol_dccp = {
        .protonum               = IPPROTO_DCCP,
-       .me                     = THIS_MODULE,
        .manip_pkt              = dccp_manip_pkt,
        .in_range               = nf_nat_proto_in_range,
        .unique_tuple           = dccp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .range_to_nlattr        = nf_nat_proto_range_to_nlattr,
        .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
 #endif
 };
index bc8d83a31c73ae4abe371e74c7b0df3e6068c03a..46ba0b9ab985b70ac5c80883a642d3048ac3cf83 100644 (file)
@@ -39,7 +39,7 @@ MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
 /* generate unique tuple ... */
 static void
 gre_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_range *range,
+                const struct nf_nat_ipv4_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
@@ -52,12 +52,12 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
        if (!ct->master)
                return;
 
-       if (maniptype == IP_NAT_MANIP_SRC)
+       if (maniptype == NF_NAT_MANIP_SRC)
                keyptr = &tuple->src.u.gre.key;
        else
                keyptr = &tuple->dst.u.gre.key;
 
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
                pr_debug("%p: NATing GRE PPTP\n", ct);
                min = 1;
                range_size = 0xffff;
@@ -99,7 +99,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
 
        /* we only have destination manip of a packet, since 'source key'
         * is not present in the packet itself */
-       if (maniptype != IP_NAT_MANIP_DST)
+       if (maniptype != NF_NAT_MANIP_DST)
                return true;
        switch (greh->version) {
        case GRE_VERSION_1701:
@@ -119,12 +119,10 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
 
 static const struct nf_nat_protocol gre = {
        .protonum               = IPPROTO_GRE,
-       .me                     = THIS_MODULE,
        .manip_pkt              = gre_manip_pkt,
        .in_range               = nf_nat_proto_in_range,
        .unique_tuple           = gre_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .range_to_nlattr        = nf_nat_proto_range_to_nlattr,
        .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
 #endif
 };
index 9f4dc1235dc7d3458616335c50a0e09eea72f08b..b35172851bae8b92094bff8dc0da833968e43f39 100644 (file)
@@ -30,7 +30,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
 
 static void
 icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_range *range,
+                 const struct nf_nat_ipv4_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
@@ -40,7 +40,7 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
 
        range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
        /* If no range specified... */
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
                range_size = 0xFFFF;
 
        for (i = 0; ; ++id) {
@@ -74,12 +74,10 @@ icmp_manip_pkt(struct sk_buff *skb,
 
 const struct nf_nat_protocol nf_nat_protocol_icmp = {
        .protonum               = IPPROTO_ICMP,
-       .me                     = THIS_MODULE,
        .manip_pkt              = icmp_manip_pkt,
        .in_range               = icmp_in_range,
        .unique_tuple           = icmp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .range_to_nlattr        = nf_nat_proto_range_to_nlattr,
        .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
 #endif
 };
index bd5a80a62a5b9f604463b1bf0fb6c31ca1ceb52b..3cce9b6c1c293c6c539cee0c3feef0c1930e2181 100644 (file)
@@ -19,7 +19,7 @@ static u_int16_t nf_sctp_port_rover;
 
 static void
 sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_range *range,
+                 const struct nf_nat_ipv4_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
@@ -46,7 +46,7 @@ sctp_manip_pkt(struct sk_buff *skb,
        iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct sctphdr *)(skb->data + hdroff);
 
-       if (maniptype == IP_NAT_MANIP_SRC) {
+       if (maniptype == NF_NAT_MANIP_SRC) {
                /* Get rid of src ip and src pt */
                oldip = iph->saddr;
                newip = tuple->src.u3.ip;
@@ -70,12 +70,10 @@ sctp_manip_pkt(struct sk_buff *skb,
 
 static const struct nf_nat_protocol nf_nat_protocol_sctp = {
        .protonum               = IPPROTO_SCTP,
-       .me                     = THIS_MODULE,
        .manip_pkt              = sctp_manip_pkt,
        .in_range               = nf_nat_proto_in_range,
        .unique_tuple           = sctp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .range_to_nlattr        = nf_nat_proto_range_to_nlattr,
        .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
 #endif
 };
index 0d67bb80130f019b48134fb675029b265d825f48..9fb4b4e72bbfeea34f894eeacbac20da9e01a396 100644 (file)
@@ -23,7 +23,7 @@ static u_int16_t tcp_port_rover;
 
 static void
 tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_range *range,
+                const struct nf_nat_ipv4_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
@@ -55,7 +55,7 @@ tcp_manip_pkt(struct sk_buff *skb,
        iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct tcphdr *)(skb->data + hdroff);
 
-       if (maniptype == IP_NAT_MANIP_SRC) {
+       if (maniptype == NF_NAT_MANIP_SRC) {
                /* Get rid of src ip and src pt */
                oldip = iph->saddr;
                newip = tuple->src.u3.ip;
@@ -82,12 +82,10 @@ tcp_manip_pkt(struct sk_buff *skb,
 
 const struct nf_nat_protocol nf_nat_protocol_tcp = {
        .protonum               = IPPROTO_TCP,
-       .me                     = THIS_MODULE,
        .manip_pkt              = tcp_manip_pkt,
        .in_range               = nf_nat_proto_in_range,
        .unique_tuple           = tcp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .range_to_nlattr        = nf_nat_proto_range_to_nlattr,
        .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
 #endif
 };
index 0b1b8601cba79465faa036f11ab301fbf0d36e80..9883336e628fd2174ec7b272ed55481a2a8b847d 100644 (file)
@@ -22,7 +22,7 @@ static u_int16_t udp_port_rover;
 
 static void
 udp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_range *range,
+                const struct nf_nat_ipv4_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
@@ -47,7 +47,7 @@ udp_manip_pkt(struct sk_buff *skb,
        iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct udphdr *)(skb->data + hdroff);
 
-       if (maniptype == IP_NAT_MANIP_SRC) {
+       if (maniptype == NF_NAT_MANIP_SRC) {
                /* Get rid of src ip and src pt */
                oldip = iph->saddr;
                newip = tuple->src.u3.ip;
@@ -73,12 +73,10 @@ udp_manip_pkt(struct sk_buff *skb,
 
 const struct nf_nat_protocol nf_nat_protocol_udp = {
        .protonum               = IPPROTO_UDP,
-       .me                     = THIS_MODULE,
        .manip_pkt              = udp_manip_pkt,
        .in_range               = nf_nat_proto_in_range,
        .unique_tuple           = udp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .range_to_nlattr        = nf_nat_proto_range_to_nlattr,
        .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
 #endif
 };
index f83ef23e2ab708d946f54586024915b1ed489c35..d24d10a7beb2ac50bb16bbe7968e8c7b567bbb34 100644 (file)
@@ -21,7 +21,7 @@ static u_int16_t udplite_port_rover;
 
 static void
 udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
-                    const struct nf_nat_range *range,
+                    const struct nf_nat_ipv4_range *range,
                     enum nf_nat_manip_type maniptype,
                     const struct nf_conn *ct)
 {
@@ -47,7 +47,7 @@ udplite_manip_pkt(struct sk_buff *skb,
        iph = (struct iphdr *)(skb->data + iphdroff);
        hdr = (struct udphdr *)(skb->data + hdroff);
 
-       if (maniptype == IP_NAT_MANIP_SRC) {
+       if (maniptype == NF_NAT_MANIP_SRC) {
                /* Get rid of src ip and src pt */
                oldip = iph->saddr;
                newip = tuple->src.u3.ip;
@@ -72,12 +72,10 @@ udplite_manip_pkt(struct sk_buff *skb,
 
 static const struct nf_nat_protocol nf_nat_protocol_udplite = {
        .protonum               = IPPROTO_UDPLITE,
-       .me                     = THIS_MODULE,
        .manip_pkt              = udplite_manip_pkt,
        .in_range               = nf_nat_proto_in_range,
        .unique_tuple           = udplite_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .range_to_nlattr        = nf_nat_proto_range_to_nlattr,
        .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
 #endif
 };
index a50f2bc1c7328805e755a204ca66253844dba702..e0afe8112b1c20bccd18ad20dd1634d0e64f240d 100644 (file)
@@ -27,7 +27,7 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
 }
 
 static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
-                                const struct nf_nat_range *range,
+                                const struct nf_nat_ipv4_range *range,
                                 enum nf_nat_manip_type maniptype,
                                 const struct nf_conn *ct)
 {
@@ -46,7 +46,6 @@ unknown_manip_pkt(struct sk_buff *skb,
 }
 
 const struct nf_nat_protocol nf_nat_unknown_protocol = {
-       /* .me isn't set: getting a ref to this cannot fail. */
        .manip_pkt              = unknown_manip_pkt,
        .in_range               = unknown_in_range,
        .unique_tuple           = unknown_unique_tuple,
index 733c9abc1cbd9ddb7c45a0b7ffae9d72f608ae62..d2a9dc314e0ebb8d4edf2e6272b5d833322ff15d 100644 (file)
@@ -44,7 +44,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
        NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
                     par->hooknum == NF_INET_LOCAL_IN);
@@ -56,7 +56,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
                            ctinfo == IP_CT_RELATED_REPLY));
        NF_CT_ASSERT(par->out != NULL);
 
-       return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
+       return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC);
 }
 
 static unsigned int
@@ -64,7 +64,7 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
        NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
                     par->hooknum == NF_INET_LOCAL_OUT);
@@ -74,12 +74,12 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
        /* Connection must be valid and new. */
        NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
 
-       return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
+       return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST);
 }
 
 static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
 {
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
        /* Must be a valid range */
        if (mr->rangesize != 1) {
@@ -91,7 +91,7 @@ static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
 
 static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
 {
-       const struct nf_nat_multi_range_compat *mr = par->targinfo;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
        /* Must be a valid range */
        if (mr->rangesize != 1) {
@@ -105,13 +105,13 @@ static unsigned int
 alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
 {
        /* Force range to this IP; let proto decide mapping for
-          per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+          per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED).
        */
-       struct nf_nat_range range;
+       struct nf_nat_ipv4_range range;
 
        range.flags = 0;
        pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
-                HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC ?
+                HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
                 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
                 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
 
@@ -140,7 +140,7 @@ int nf_nat_rule_find(struct sk_buff *skb,
 static struct xt_target ipt_snat_reg __read_mostly = {
        .name           = "SNAT",
        .target         = ipt_snat_target,
-       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
        .table          = "nat",
        .hooks          = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
        .checkentry     = ipt_snat_checkentry,
@@ -150,7 +150,7 @@ static struct xt_target ipt_snat_reg __read_mostly = {
 static struct xt_target ipt_dnat_reg __read_mostly = {
        .name           = "DNAT",
        .target         = ipt_dnat_target,
-       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
        .table          = "nat",
        .hooks          = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
        .checkentry     = ipt_dnat_checkentry,
index 78844d9208f161d209f161af7d1509a691995dd1..d0319f96269fb88384dcf5a64205761589b2e791 100644 (file)
@@ -249,25 +249,25 @@ static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
 static void ip_nat_sip_expected(struct nf_conn *ct,
                                struct nf_conntrack_expect *exp)
 {
-       struct nf_nat_range range;
+       struct nf_nat_ipv4_range range;
 
        /* This must be a fresh one. */
        BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
        /* For DST manip, map port here to where it's expected. */
-       range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
        range.min = range.max = exp->saved_proto;
        range.min_ip = range.max_ip = exp->saved_ip;
-       nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 
        /* Change src to where master sends to, but only if the connection
         * actually came from the same source. */
        if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
            ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
-               range.flags = IP_NAT_RANGE_MAP_IPS;
+               range.flags = NF_NAT_RANGE_MAP_IPS;
                range.min_ip = range.max_ip
                        = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
-               nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+               nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
        }
 }
 
index 92900482edea3c469168bcdeed43e9a8eb58dc54..3828a4229822951b8ad71839ac6241a9a2b19fbf 100644 (file)
@@ -137,7 +137,7 @@ nf_nat_fn(unsigned int hooknum,
                                return ret;
                } else
                        pr_debug("Already setup manip %s for ct %p\n",
-                                maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
+                                maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
                break;
 
index a06f73fdb3c014e793e50a9ba06363862b3c5eab..43d4c3b223699aee36de3c3c79c6d82b5236a4c0 100644 (file)
@@ -339,7 +339,6 @@ void ping_err(struct sk_buff *skb, u32 info)
        sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
                            ntohs(icmph->un.echo.id), skb->dev->ifindex);
        if (sk == NULL) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                pr_debug("no socket, dropping\n");
                return; /* No socket for error */
        }
@@ -679,7 +678,6 @@ static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
                inet_sk(sk), inet_sk(sk)->inet_num, skb);
        if (sock_queue_rcv_skb(sk, skb) < 0) {
-               ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_INERRORS);
                kfree_skb(skb);
                pr_debug("ping_queue_rcv_skb -> failed\n");
                return -1;
index 466ea8bb7a4d916e41c838389c7dcaf2b7f01b4a..3569d8ecaeac55e546912729322f1b15160d2c0c 100644 (file)
@@ -56,17 +56,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
 
        local_bh_disable();
        orphans = percpu_counter_sum_positive(&tcp_orphan_count);
-       sockets = percpu_counter_sum_positive(&tcp_sockets_allocated);
+       sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
        local_bh_enable();
 
        socket_seq_show(seq);
        seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
                   sock_prot_inuse_get(net, &tcp_prot), orphans,
                   tcp_death_row.tw_count, sockets,
-                  atomic_long_read(&tcp_memory_allocated));
+                  proto_memory_allocated(&tcp_prot));
        seq_printf(seq, "UDP: inuse %d mem %ld\n",
                   sock_prot_inuse_get(net, &udp_prot),
-                  atomic_long_read(&udp_memory_allocated));
+                  proto_memory_allocated(&udp_prot));
        seq_printf(seq, "UDPLITE: inuse %d\n",
                   sock_prot_inuse_get(net, &udplite_prot));
        seq_printf(seq, "RAW: inuse %d\n",
@@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq)
 
        count = 0;
        for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
-               val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
+               val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
                if (val) {
                        type[count] = i;
                        vals[count++] = val;
@@ -307,6 +307,7 @@ static void icmp_put(struct seq_file *seq)
 {
        int i;
        struct net *net = seq->private;
+       atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
 
        seq_puts(seq, "\nIcmp: InMsgs InErrors");
        for (i=0; icmpmibmap[i].name != NULL; i++)
@@ -319,15 +320,13 @@ static void icmp_put(struct seq_file *seq)
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
        for (i=0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                       snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-                               icmpmibmap[i].index));
+                          atomic_long_read(ptr + icmpmibmap[i].index));
        seq_printf(seq, " %lu %lu",
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
        for (i=0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                       snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-                               icmpmibmap[i].index | 0x100));
+                          atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
 }
 
 /*
index 007e2eb769d33ede3afdb4e4fc69c3e9de6db1b4..3ccda5ae8a27b24855c86cf0f3e858c648fc5295 100644 (file)
@@ -292,7 +292,8 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
 {
        /* Charge it to the socket. */
 
-       if (ip_queue_rcv_skb(sk, skb) < 0) {
+       ipv4_pktinfo_prepare(skb);
+       if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -327,6 +328,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
        unsigned int iphlen;
        int err;
        struct rtable *rt = *rtp;
+       int hlen, tlen;
 
        if (length > rt->dst.dev->mtu) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -336,12 +338,14 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
        if (flags&MSG_PROBE)
                goto out;
 
+       hlen = LL_RESERVED_SPACE(rt->dst.dev);
+       tlen = rt->dst.dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk,
-                                 length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+                                 length + hlen + tlen + 15,
                                  flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto error;
-       skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+       skb_reserve(skb, hlen);
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
index 155138d8ec8bb9d7ef0e907b36b0725e0910394d..bcacf54e541879a01f8afe737e2cbbe13ce497c3 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
-#include <net/atmclip.h>
 #include <net/secure_seq.h>
 
 #define RT_FL_TOS(oldflp4) \
-    ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+       ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 
 #define IP_MAX_MTU     0xFFF0
 
 
 static int ip_rt_max_size;
 static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_redirect_number __read_mostly = 9;
 static int ip_rt_redirect_load __read_mostly   = HZ / 50;
@@ -131,6 +132,10 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
 static int ip_rt_min_pmtu __read_mostly                = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 static int rt_chain_length_max __read_mostly   = 20;
+static int redirect_genid;
+
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
 
 /*
  *     Interface to generic destination cache.
@@ -138,7 +143,7 @@ static int rt_chain_length_max __read_mostly        = 20;
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ipv4_default_advmss(const struct dst_entry *dst);
-static unsigned int     ipv4_default_mtu(const struct dst_entry *dst);
+static unsigned int     ipv4_mtu(const struct dst_entry *dst);
 static void             ipv4_dst_destroy(struct dst_entry *dst);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
@@ -193,7 +198,7 @@ static struct dst_ops ipv4_dst_ops = {
        .gc =                   rt_garbage_collect,
        .check =                ipv4_dst_check,
        .default_advmss =       ipv4_default_advmss,
-       .default_mtu =          ipv4_default_mtu,
+       .mtu =                  ipv4_mtu,
        .cow_metrics =          ipv4_cow_metrics,
        .destroy =              ipv4_dst_destroy,
        .ifdown =               ipv4_dst_ifdown,
@@ -416,9 +421,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
        else {
                struct rtable *r = v;
                struct neighbour *n;
-               int len;
+               int len, HHUptod;
+
+               rcu_read_lock();
+               n = dst_get_neighbour_noref(&r->dst);
+               HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+               rcu_read_unlock();
 
-               n = dst_get_neighbour(&r->dst);
                seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
                              "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
                        r->dst.dev ? r->dst.dev->name : "*",
@@ -432,7 +441,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                              dst_metric(&r->dst, RTAX_RTTVAR)),
                        r->rt_key_tos,
                        -1,
-                       (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+                       HHUptod,
                        r->rt_spec_dst, &len);
 
                seq_printf(seq, "%*s\n", 127 - len, "");
@@ -825,6 +834,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        return ONE;
 }
 
+static void rt_check_expire(void)
+{
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       unsigned long delta;
+       u64 mult;
+
+       delta = jiffies - expires_ljiffies;
+       expires_ljiffies = jiffies;
+       mult = ((u64)delta) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
+               unsigned long length;
+
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+
+               if (need_resched())
+                       cond_resched();
+
+               samples++;
+
+               if (rcu_dereference_raw(*rthp) == NULL)
+                       continue;
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+                       prefetch(rth->dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->dst.rt_next;
+                               rt_free(rth);
+                               continue;
+                       }
+                       if (rth->dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->dst.rt_next;
+                                       /*
+                                        * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+                                       length += has_noalias(rt_hash_table[i].chain, rth);
+                                       continue;
+                               }
+                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+                               goto nofree;
+
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->dst.rt_next;
+                       rt_free(rth);
+               }
+               spin_unlock_bh(rt_hash_lock_addr(i));
+               sum += length;
+               sum2 += length*length;
+       }
+       if (samples) {
+               unsigned long avg = sum / samples;
+               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+               rt_chain_length_max = max_t(unsigned long,
+                                       ip_rt_gc_elasticity,
+                                       (avg + 4*sd) >> FRACT_BITS);
+       }
+       rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+       rt_check_expire();
+       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -837,6 +937,7 @@ static void rt_cache_invalidate(struct net *net)
 
        get_random_bytes(&shuffle, sizeof(shuffle));
        atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+       redirect_genid++;
 }
 
 /*
@@ -1013,23 +1114,18 @@ static int slow_chain_length(const struct rtable *head)
 
 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
 {
-       struct neigh_table *tbl = &arp_tbl;
        static const __be32 inaddr_any = 0;
        struct net_device *dev = dst->dev;
        const __be32 *pkey = daddr;
        struct neighbour *n;
 
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-       if (dev->type == ARPHRD_ATM)
-               tbl = clip_tbl_hook;
-#endif
        if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
                pkey = &inaddr_any;
 
-       n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
+       n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey);
        if (n)
                return n;
-       return neigh_create(tbl, pkey, dev);
+       return neigh_create(&arp_tbl, pkey, dev);
 }
 
 static int rt_bind_neighbour(struct rtable *rt)
@@ -1265,7 +1361,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                if (rt->peer == NULL)
                        rt_bind_peer(rt, rt->rt_dst, 1);
 
@@ -1276,7 +1372,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
                        iph->id = htons(inet_getid(rt->peer, more));
                        return;
                }
-       } else
+       } else if (!rt)
                printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
                       __builtin_return_address(0));
 
@@ -1304,16 +1400,40 @@ static void rt_del(unsigned hash, struct rtable *rt)
        spin_unlock_bh(rt_hash_lock_addr(hash));
 }
 
+static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+{
+       struct rtable *rt = (struct rtable *) dst;
+       __be32 orig_gw = rt->rt_gateway;
+       struct neighbour *n, *old_n;
+
+       dst_confirm(&rt->dst);
+
+       rt->rt_gateway = peer->redirect_learned.a4;
+
+       n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
+       if (IS_ERR(n)) {
+               rt->rt_gateway = orig_gw;
+               return;
+       }
+       old_n = xchg(&rt->dst._neighbour, n);
+       if (old_n)
+               neigh_release(old_n);
+       if (!(n->nud_state & NUD_VALID)) {
+               neigh_event_send(n, NULL);
+       } else {
+               rt->rt_flags |= RTCF_REDIRECTED;
+               call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
+       }
+}
+
 /* called in rcu_read_lock() section */
 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                    __be32 saddr, struct net_device *dev)
 {
        int s, i;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rt;
        __be32 skeys[2] = { saddr, 0 };
        int    ikeys[2] = { dev->ifindex, 0 };
-       struct flowi4 fl4;
        struct inet_peer *peer;
        struct net *net;
 
@@ -1336,33 +1456,44 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                        goto reject_redirect;
        }
 
-       memset(&fl4, 0, sizeof(fl4));
-       fl4.daddr = daddr;
        for (s = 0; s < 2; s++) {
                for (i = 0; i < 2; i++) {
-                       fl4.flowi4_oif = ikeys[i];
-                       fl4.saddr = skeys[s];
-                       rt = __ip_route_output_key(net, &fl4);
-                       if (IS_ERR(rt))
-                               continue;
-
-                       if (rt->dst.error || rt->dst.dev != dev ||
-                           rt->rt_gateway != old_gw) {
-                               ip_rt_put(rt);
-                               continue;
-                       }
-
-                       if (!rt->peer)
-                               rt_bind_peer(rt, rt->rt_dst, 1);
+                       unsigned int hash;
+                       struct rtable __rcu **rthp;
+                       struct rtable *rt;
+
+                       hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
+
+                       rthp = &rt_hash_table[hash].chain;
+
+                       while ((rt = rcu_dereference(*rthp)) != NULL) {
+                               rthp = &rt->dst.rt_next;
+
+                               if (rt->rt_key_dst != daddr ||
+                                   rt->rt_key_src != skeys[s] ||
+                                   rt->rt_oif != ikeys[i] ||
+                                   rt_is_input_route(rt) ||
+                                   rt_is_expired(rt) ||
+                                   !net_eq(dev_net(rt->dst.dev), net) ||
+                                   rt->dst.error ||
+                                   rt->dst.dev != dev ||
+                                   rt->rt_gateway != old_gw)
+                                       continue;
 
-                       peer = rt->peer;
-                       if (peer) {
-                               peer->redirect_learned.a4 = new_gw;
-                               atomic_inc(&__rt_peer_genid);
+                               if (!rt->peer)
+                                       rt_bind_peer(rt, rt->rt_dst, 1);
+
+                               peer = rt->peer;
+                               if (peer) {
+                                       if (peer->redirect_learned.a4 != new_gw ||
+                                           peer->redirect_genid != redirect_genid) {
+                                               peer->redirect_learned.a4 = new_gw;
+                                               peer->redirect_genid = redirect_genid;
+                                               atomic_inc(&__rt_peer_genid);
+                                       }
+                                       check_peer_redir(&rt->dst, peer);
+                               }
                        }
-
-                       ip_rt_put(rt);
-                       return;
                }
        }
        return;
@@ -1649,40 +1780,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
        }
 }
 
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
-{
-       struct rtable *rt = (struct rtable *) dst;
-       __be32 orig_gw = rt->rt_gateway;
-       struct neighbour *n, *old_n;
-
-       dst_confirm(&rt->dst);
-
-       rt->rt_gateway = peer->redirect_learned.a4;
-
-       n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
-       if (IS_ERR(n))
-               return PTR_ERR(n);
-       old_n = xchg(&rt->dst._neighbour, n);
-       if (old_n)
-               neigh_release(old_n);
-       if (!n || !(n->nud_state & NUD_VALID)) {
-               if (n)
-                       neigh_event_send(n, NULL);
-               rt->rt_gateway = orig_gw;
-               return -EAGAIN;
-       } else {
-               rt->rt_flags |= RTCF_REDIRECTED;
-               call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
-       }
-       return 0;
-}
 
-static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+static void ipv4_validate_peer(struct rtable *rt)
 {
-       struct rtable *rt = (struct rtable *) dst;
-
-       if (rt_is_expired(rt))
-               return NULL;
        if (rt->rt_peer_genid != rt_peer_genid()) {
                struct inet_peer *peer;
 
@@ -1691,17 +1791,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 
                peer = rt->peer;
                if (peer) {
-                       check_peer_pmtu(dst, peer);
+                       check_peer_pmtu(&rt->dst, peer);
 
+                       if (peer->redirect_genid != redirect_genid)
+                               peer->redirect_learned.a4 = 0;
                        if (peer->redirect_learned.a4 &&
-                           peer->redirect_learned.a4 != rt->rt_gateway) {
-                               if (check_peer_redir(dst, peer))
-                                       return NULL;
-                       }
+                           peer->redirect_learned.a4 != rt->rt_gateway)
+                               check_peer_redir(&rt->dst, peer);
                }
 
                rt->rt_peer_genid = rt_peer_genid();
        }
+}
+
+static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+{
+       struct rtable *rt = (struct rtable *) dst;
+
+       if (rt_is_expired(rt))
+               return NULL;
+       ipv4_validate_peer(rt);
        return dst;
 }
 
@@ -1806,12 +1915,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
        return advmss;
 }
 
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_mtu(const struct dst_entry *dst)
 {
-       unsigned int mtu = dst->dev->mtu;
+       const struct rtable *rt = (const struct rtable *) dst;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       if (mtu && rt_is_output_route(rt))
+               return mtu;
+
+       mtu = dst->dev->mtu;
 
        if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
-               const struct rtable *rt = (const struct rtable *) dst;
 
                if (rt->rt_gateway != rt->rt_dst && mtu > 576)
                        mtu = 576;
@@ -1844,6 +1958,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
                dst_init_metrics(&rt->dst, peer->metrics, false);
 
                check_peer_pmtu(&rt->dst, peer);
+               if (peer->redirect_genid != redirect_genid)
+                       peer->redirect_learned.a4 = 0;
                if (peer->redirect_learned.a4 &&
                    peer->redirect_learned.a4 != rt->rt_gateway) {
                        rt->rt_gateway = peer->redirect_learned.a4;
@@ -2349,6 +2465,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                    rth->rt_mark == skb->mark &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       ipv4_validate_peer(rth);
                        if (noref) {
                                dst_use_noref(&rth->dst, jiffies);
                                skb_dst_set_noref(skb, &rth->dst);
@@ -2407,11 +2524,11 @@ EXPORT_SYMBOL(ip_route_input_common);
 static struct rtable *__mkroute_output(const struct fib_result *res,
                                       const struct flowi4 *fl4,
                                       __be32 orig_daddr, __be32 orig_saddr,
-                                      int orig_oif, struct net_device *dev_out,
+                                      int orig_oif, __u8 orig_rtos,
+                                      struct net_device *dev_out,
                                       unsigned int flags)
 {
        struct fib_info *fi = res->fi;
-       u32 tos = RT_FL_TOS(fl4);
        struct in_device *in_dev;
        u16 type = res->type;
        struct rtable *rth;
@@ -2462,7 +2579,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        rth->rt_genid = rt_genid(dev_net(dev_out));
        rth->rt_flags   = flags;
        rth->rt_type    = type;
-       rth->rt_key_tos = tos;
+       rth->rt_key_tos = orig_rtos;
        rth->rt_dst     = fl4->daddr;
        rth->rt_src     = fl4->saddr;
        rth->rt_route_iif = 0;
@@ -2512,7 +2629,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 {
        struct net_device *dev_out = NULL;
-       u32 tos = RT_FL_TOS(fl4);
+       __u8 tos = RT_FL_TOS(fl4);
        unsigned int flags = 0;
        struct fib_result res;
        struct rtable *rth;
@@ -2688,7 +2805,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 
 make_route:
        rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
-                              dev_out, flags);
+                              tos, dev_out, flags);
        if (!IS_ERR(rth)) {
                unsigned int hash;
 
@@ -2724,6 +2841,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       ipv4_validate_peer(rth);
                        dst_use(&rth->dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
                        rcu_read_unlock_bh();
@@ -2747,9 +2865,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
        return NULL;
 }
 
-static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
 {
-       return 0;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -2767,7 +2887,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IP),
        .destroy                =       ipv4_dst_destroy,
        .check                  =       ipv4_blackhole_dst_check,
-       .default_mtu            =       ipv4_blackhole_default_mtu,
+       .mtu                    =       ipv4_blackhole_mtu,
        .default_advmss         =       ipv4_default_advmss,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
        .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
@@ -2845,7 +2965,7 @@ static int rt_fill_info(struct net *net,
        struct rtable *rt = skb_rtable(skb);
        struct rtmsg *r;
        struct nlmsghdr *nlh;
-       long expires = 0;
+       unsigned long expires = 0;
        const struct inet_peer *peer = rt->peer;
        u32 id = 0, ts = 0, tsage = 0, error;
 
@@ -2902,8 +3022,12 @@ static int rt_fill_info(struct net *net,
                        tsage = get_seconds() - peer->tcp_ts_stamp;
                }
                expires = ACCESS_ONCE(peer->pmtu_expires);
-               if (expires)
-                       expires -= jiffies;
+               if (expires) {
+                       if (time_before(jiffies, expires))
+                               expires -= jiffies;
+                       else
+                               expires = 0;
+               }
        }
 
        if (rt_is_input_route(rt)) {
@@ -3144,6 +3268,13 @@ static ctl_table ipv4_route_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "gc_interval",
+               .data           = &ip_rt_gc_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        {
                .procname       = "redirect_load",
                .data           = &ip_rt_redirect_load,
@@ -3354,6 +3485,11 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
+       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+       expires_ljiffies = jiffies;
+       schedule_delayed_work(&expires_work,
+               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 90f6544c13e287639ae2d7686a55dc8f6c9cd168..51fdbb490437a565e7d9d89397295fddb28da34e 100644 (file)
@@ -245,7 +245,7 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
        if (!sysctl_tcp_timestamps)
                return false;
 
-       tcp_opt->sack_ok = (options >> 4) & 0x1;
+       tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
        *ecn_ok = (options >> 5) & 1;
        if (*ecn_ok && !sysctl_tcp_ecn)
                return false;
index 69fd7201129a742ee748932d52929d7fe548289c..4aa7e9dc0cbb961ab75fedea9944304a609dd2fc 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/nsproxy.h>
+#include <linux/swap.h>
 #include <net/snmp.h>
 #include <net/icmp.h>
 #include <net/ip.h>
@@ -23,6 +24,7 @@
 #include <net/cipso_ipv4.h>
 #include <net/inet_frag.h>
 #include <net/ping.h>
+#include <net/tcp_memcontrol.h>
 
 static int zero;
 static int tcp_retr1_max = 255;
@@ -73,7 +75,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
 }
 
 
-void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
 {
        gid_t *data = table->data;
        unsigned seq;
@@ -86,7 +88,7 @@ void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t
 }
 
 /* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, int range[2])
+static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
 {
        gid_t *data = table->data;
        write_seqlock(&sysctl_local_ports.lock);
@@ -174,6 +176,49 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
        return ret;
 }
 
+static int ipv4_tcp_mem(ctl_table *ctl, int write,
+                          void __user *buffer, size_t *lenp,
+                          loff_t *ppos)
+{
+       int ret;
+       unsigned long vec[3];
+       struct net *net = current->nsproxy->net_ns;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       struct mem_cgroup *memcg;
+#endif
+
+       ctl_table tmp = {
+               .data = &vec,
+               .maxlen = sizeof(vec),
+               .mode = ctl->mode,
+       };
+
+       if (!write) {
+               ctl->data = &net->ipv4.sysctl_tcp_mem;
+               return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
+       }
+
+       ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+       if (ret)
+               return ret;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       rcu_read_lock();
+       memcg = mem_cgroup_from_task(current);
+
+       tcp_prot_mem(memcg, vec[0], 0);
+       tcp_prot_mem(memcg, vec[1], 1);
+       tcp_prot_mem(memcg, vec[2], 2);
+       rcu_read_unlock();
+#endif
+
+       net->ipv4.sysctl_tcp_mem[0] = vec[0];
+       net->ipv4.sysctl_tcp_mem[1] = vec[1];
+       net->ipv4.sysctl_tcp_mem[2] = vec[2];
+
+       return 0;
+}
+
 static struct ctl_table ipv4_table[] = {
        {
                .procname       = "tcp_timestamps",
@@ -432,13 +477,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_mem",
-               .data           = &sysctl_tcp_mem,
-               .maxlen         = sizeof(sysctl_tcp_mem),
-               .mode           = 0644,
-               .proc_handler   = proc_doulongvec_minmax
-       },
        {
                .procname       = "tcp_wmem",
                .data           = &sysctl_tcp_wmem,
@@ -721,6 +759,12 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
        },
+       {
+               .procname       = "tcp_mem",
+               .maxlen         = sizeof(init_net.ipv4.sysctl_tcp_mem),
+               .mode           = 0644,
+               .proc_handler   = ipv4_tcp_mem,
+       },
        { }
 };
 
@@ -734,6 +778,7 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
        struct ctl_table *table;
+       unsigned long limit;
 
        table = ipv4_net_table;
        if (!net_eq(net, &init_net)) {
@@ -769,6 +814,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
 
        net->ipv4.sysctl_rt_cache_rebuild_count = 4;
 
+       limit = nr_free_buffer_pages() / 8;
+       limit = max(limit, 128UL);
+       net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
+       net->ipv4.sysctl_tcp_mem[1] = limit;
+       net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+
        net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
                        net_ipv4_ctl_path, table);
        if (net->ipv4.ipv4_hdr == NULL)
index 34f5db1e1c8b85110192c881e17c7fd11c01d633..9bcdec3ad772171a6aa71584b86bc5c1c998db83 100644 (file)
@@ -282,11 +282,9 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
-long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
-EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
@@ -888,18 +886,18 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(tcp_sendpage);
 
-#define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
-#define TCP_OFF(sk)    (sk->sk_sndmsg_off)
-
-static inline int select_size(const struct sock *sk, int sg)
+static inline int select_size(const struct sock *sk, bool sg)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
        if (sg) {
-               if (sk_can_gso(sk))
-                       tmp = 0;
-               else {
+               if (sk_can_gso(sk)) {
+                       /* Small frames wont use a full page:
+                        * Payload will immediately follow tcp header.
+                        */
+                       tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+               } else {
                        int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
 
                        if (tmp >= pgbreak &&
@@ -917,9 +915,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct iovec *iov;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int iovlen, flags;
+       int iovlen, flags, err, copied;
        int mss_now, size_goal;
-       int sg, err, copied;
+       bool sg;
        long timeo;
 
        lock_sock(sk);
@@ -946,7 +944,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                goto out_err;
 
-       sg = sk->sk_route_caps & NETIF_F_SG;
+       sg = !!(sk->sk_route_caps & NETIF_F_SG);
 
        while (--iovlen >= 0) {
                size_t seglen = iov->iov_len;
@@ -1005,8 +1003,13 @@ new_segment:
                        } else {
                                int merge = 0;
                                int i = skb_shinfo(skb)->nr_frags;
-                               struct page *page = TCP_PAGE(sk);
-                               int off = TCP_OFF(sk);
+                               struct page *page = sk->sk_sndmsg_page;
+                               int off;
+
+                               if (page && page_count(page) == 1)
+                                       sk->sk_sndmsg_off = 0;
+
+                               off = sk->sk_sndmsg_off;
 
                                if (skb_can_coalesce(skb, i, page, off) &&
                                    off != PAGE_SIZE) {
@@ -1023,7 +1026,7 @@ new_segment:
                                } else if (page) {
                                        if (off == PAGE_SIZE) {
                                                put_page(page);
-                                               TCP_PAGE(sk) = page = NULL;
+                                               sk->sk_sndmsg_page = page = NULL;
                                                off = 0;
                                        }
                                } else
@@ -1049,9 +1052,9 @@ new_segment:
                                        /* If this page was new, give it to the
                                         * socket so it does not get leaked.
                                         */
-                                       if (!TCP_PAGE(sk)) {
-                                               TCP_PAGE(sk) = page;
-                                               TCP_OFF(sk) = 0;
+                                       if (!sk->sk_sndmsg_page) {
+                                               sk->sk_sndmsg_page = page;
+                                               sk->sk_sndmsg_off = 0;
                                        }
                                        goto do_error;
                                }
@@ -1061,15 +1064,15 @@ new_segment:
                                        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                                } else {
                                        skb_fill_page_desc(skb, i, page, off, copy);
-                                       if (TCP_PAGE(sk)) {
+                                       if (sk->sk_sndmsg_page) {
                                                get_page(page);
                                        } else if (off + copy < PAGE_SIZE) {
                                                get_page(page);
-                                               TCP_PAGE(sk) = page;
+                                               sk->sk_sndmsg_page = page;
                                        }
                                }
 
-                               TCP_OFF(sk) = off + copy;
+                               sk->sk_sndmsg_off = off + copy;
                        }
 
                        if (!copied)
@@ -2653,7 +2656,8 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct tcphdr *th;
@@ -3272,14 +3276,9 @@ void __init tcp_init(void)
        sysctl_tcp_max_orphans = cnt / 2;
        sysctl_max_syn_backlog = max(128, cnt / 256);
 
-       limit = nr_free_buffer_pages() / 8;
-       limit = max(limit, 128UL);
-       sysctl_tcp_mem[0] = limit / 4 * 3;
-       sysctl_tcp_mem[1] = limit;
-       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
-
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
-       limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
+       limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1])
+               << (PAGE_SHIFT - 7);
        max_share = min(4UL*1024*1024, limit);
 
        sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
index 850c737e08e2a4a9185c64c8776711d83b8c7b08..fc6d475f488f87a9bb084e9b9930e8e28cf7757f 100644 (file)
@@ -292,7 +292,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
            left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
            left * tp->mss_cache < sk->sk_gso_max_size)
                return 1;
-       return left <= tcp_max_burst(tp);
+       return left <= tcp_max_tso_deferred_mss(tp);
 }
 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
 
index 939edb3b8e4dcf6f4d4661cd44c0664a1d7b2313..8cd357a8be7990b5212253d95fa72a1f740a3edf 100644 (file)
@@ -34,11 +34,23 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                tcp_get_info(sk, info);
 }
 
+static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
+}
+
+static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler tcp_diag_handler = {
-       .idiag_hashinfo  = &tcp_hashinfo,
+       .dump            = tcp_diag_dump,
+       .dump_one        = tcp_diag_dump_one,
        .idiag_get_info  = tcp_diag_get_info,
-       .idiag_type      = TCPDIAG_GETSOCK,
-       .idiag_info_size = sizeof(struct tcp_info),
+       .idiag_type      = IPPROTO_TCP,
 };
 
 static int __init tcp_diag_init(void)
@@ -54,4 +66,4 @@ static void __exit tcp_diag_exit(void)
 module_init(tcp_diag_init);
 module_exit(tcp_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */);
index 52b5c2d0ecd0aee7c99d1f31e8bf3a1115a293b9..2877c3e0958777dff87612bb7c057df451f5b57a 100644 (file)
@@ -322,7 +322,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !tcp_memory_pressure) {
+           !sk_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -411,8 +411,8 @@ static void tcp_clamp_window(struct sock *sk)
 
        if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-           !tcp_memory_pressure &&
-           atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+           !sk_under_memory_pressure(sk) &&
+           sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
                                    sysctl_tcp_rmem[2]);
        }
@@ -865,13 +865,13 @@ static void tcp_disable_fack(struct tcp_sock *tp)
        /* RFC3517 uses different metric in lost marker => reset on change */
        if (tcp_is_fack(tp))
                tp->lost_skb_hint = NULL;
-       tp->rx_opt.sack_ok &= ~2;
+       tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED;
 }
 
 /* Take a notice that peer is sending D-SACKs */
 static void tcp_dsack_seen(struct tcp_sock *tp)
 {
-       tp->rx_opt.sack_ok |= 4;
+       tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
 }
 
 /* Initialize metrics on socket. */
@@ -2663,7 +2663,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
                       tp->snd_ssthresh, tp->prior_ssthresh,
                       tp->packets_out);
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        else if (sk->sk_family == AF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
                printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
@@ -2858,7 +2858,7 @@ static void tcp_try_keep_open(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int state = TCP_CA_Open;
 
-       if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
+       if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
                state = TCP_CA_Disorder;
 
        if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2881,7 +2881,8 @@ static void tcp_try_to_open(struct sock *sk, int flag)
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
-               tcp_moderate_cwnd(tp);
+               if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
+                       tcp_moderate_cwnd(tp);
        } else {
                tcp_cwnd_down(sk, flag);
        }
@@ -3009,11 +3010,11 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-                                 int newly_acked_sacked, int flag)
+                                 int newly_acked_sacked, bool is_dupack,
+                                 int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
        int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
        int fast_rexmit = 0, mib_idx;
@@ -3066,17 +3067,6 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        }
                        break;
 
-               case TCP_CA_Disorder:
-                       tcp_try_undo_dsack(sk);
-                       if (!tp->undo_marker ||
-                           /* For SACK case do not Open to allow to undo
-                            * catching for all duplicate ACKs. */
-                           tcp_is_reno(tp) || tp->snd_una != tp->high_seq) {
-                               tp->undo_marker = 0;
-                               tcp_set_ca_state(sk, TCP_CA_Open);
-                       }
-                       break;
-
                case TCP_CA_Recovery:
                        if (tcp_is_reno(tp))
                                tcp_reset_reno_sack(tp);
@@ -3117,7 +3107,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_add_reno_sack(sk);
                }
 
-               if (icsk->icsk_ca_state == TCP_CA_Disorder)
+               if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
 
                if (!tcp_time_to_recover(sk)) {
@@ -3681,10 +3671,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        u32 prior_snd_una = tp->snd_una;
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
+       bool is_dupack = false;
        u32 prior_in_flight;
        u32 prior_fackets;
        int prior_packets;
        int prior_sacked = tp->sacked_out;
+       int pkts_acked = 0;
        int newly_acked_sacked = 0;
        int frto_cwnd = 0;
 
@@ -3757,6 +3749,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        /* See if we can take anything off of the retransmit queue. */
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
+       pkts_acked = prior_packets - tp->packets_out;
        newly_acked_sacked = (prior_packets - prior_sacked) -
                             (tp->packets_out - tp->sacked_out);
 
@@ -3771,8 +3764,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
                    tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
-               tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
-                                     newly_acked_sacked, flag);
+               is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
+               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+                                     is_dupack, flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
                        tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3784,6 +3778,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        return 1;
 
 no_queue:
+       /* If data was DSACKed, see if we can undo a cwnd reduction. */
+       if (flag & FLAG_DSACKING_ACK)
+               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+                                     is_dupack, flag);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3797,10 +3795,14 @@ invalid_ack:
        return -1;
 
 old_ack:
+       /* If data was SACKed, tag it and see if we should send more data.
+        * If data was DSACKed, see if we can undo a cwnd reduction.
+        */
        if (TCP_SKB_CB(skb)->sacked) {
-               tcp_sacktag_write_queue(sk, skb, prior_snd_una);
-               if (icsk->icsk_ca_state == TCP_CA_Open)
-                       tcp_try_keep_open(sk);
+               flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+               newly_acked_sacked = tp->sacked_out - prior_sacked;
+               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+                                     is_dupack, flag);
        }
 
        SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
@@ -3876,7 +3878,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
                        case TCPOPT_SACK_PERM:
                                if (opsize == TCPOLEN_SACK_PERM && th->syn &&
                                    !estab && sysctl_tcp_sack) {
-                                       opt_rx->sack_ok = 1;
+                                       opt_rx->sack_ok = TCP_SACK_SEEN;
                                        tcp_sack_reset(opt_rx);
                                }
                                break;
@@ -4864,7 +4866,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
-       else if (tcp_memory_pressure)
+       else if (sk_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
@@ -4930,11 +4932,11 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
                return 0;
 
        /* If we are under global TCP memory pressure, do not expand.  */
-       if (tcp_memory_pressure)
+       if (sk_under_memory_pressure(sk))
                return 0;
 
        /* If we are under soft global TCP memory pressure, do not expand.  */
-       if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+       if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
                return 0;
 
        /* If we filled the congestion window, do not expand.  */
@@ -5809,6 +5811,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
 
                if (th->syn) {
+                       if (th->fin)
+                               goto discard;
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
index a7443159c400450e73ff9b83230fe40a48d0da39..1eb4ad57670eb0f47c4a3ecde927819cb83ef12f 100644 (file)
@@ -73,6 +73,7 @@
 #include <net/xfrm.h>
 #include <net/netdma.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <linux/inet.h>
 #include <linux/ipv6.h>
@@ -1510,6 +1511,8 @@ exit:
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return NULL;
 put_and_exit:
+       tcp_clear_xmit_timers(newsk);
+       tcp_cleanup_congestion_control(newsk);
        bh_unlock_sock(newsk);
        sock_put(newsk);
        goto exit;
@@ -1915,7 +1918,8 @@ static int tcp_v4_init_sock(struct sock *sk)
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
        local_bh_disable();
-       percpu_counter_inc(&tcp_sockets_allocated);
+       sock_update_memcg(sk);
+       sk_sockets_allocated_inc(sk);
        local_bh_enable();
 
        return 0;
@@ -1971,7 +1975,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
                tp->cookie_values = NULL;
        }
 
-       percpu_counter_dec(&tcp_sockets_allocated);
+       sk_sockets_allocated_dec(sk);
+       sock_release_memcg(sk);
 }
 EXPORT_SYMBOL(tcp_v4_destroy_sock);
 
@@ -2618,7 +2623,6 @@ struct proto tcp_prot = {
        .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
-       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
@@ -2632,10 +2636,14 @@ struct proto tcp_prot = {
        .compat_setsockopt      = compat_tcp_setsockopt,
        .compat_getsockopt      = compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       .init_cgroup            = tcp_init_cgroup,
+       .destroy_cgroup         = tcp_destroy_cgroup,
+       .proto_cgroup           = tcp_proto_cgroup,
+#endif
 };
 EXPORT_SYMBOL(tcp_prot);
 
-
 static int __net_init tcp_sk_init(struct net *net)
 {
        return inet_ctl_sock_create(&net->ipv4.tcp_sock,
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
new file mode 100644 (file)
index 0000000..7fed04f
--- /dev/null
@@ -0,0 +1,272 @@
+#include <net/tcp.h>
+#include <net/tcp_memcontrol.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <linux/nsproxy.h>
+#include <linux/memcontrol.h>
+#include <linux/module.h>
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft);
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+                           const char *buffer);
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event);
+
+static struct cftype tcp_files[] = {
+       {
+               .name = "kmem.tcp.limit_in_bytes",
+               .write_string = tcp_cgroup_write,
+               .read_u64 = tcp_cgroup_read,
+               .private = RES_LIMIT,
+       },
+       {
+               .name = "kmem.tcp.usage_in_bytes",
+               .read_u64 = tcp_cgroup_read,
+               .private = RES_USAGE,
+       },
+       {
+               .name = "kmem.tcp.failcnt",
+               .private = RES_FAILCNT,
+               .trigger = tcp_cgroup_reset,
+               .read_u64 = tcp_cgroup_read,
+       },
+       {
+               .name = "kmem.tcp.max_usage_in_bytes",
+               .private = RES_MAX_USAGE,
+               .trigger = tcp_cgroup_reset,
+               .read_u64 = tcp_cgroup_read,
+       },
+};
+
+static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
+{
+       return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
+}
+
+static void memcg_tcp_enter_memory_pressure(struct sock *sk)
+{
+       if (sk->sk_cgrp->memory_pressure)
+               *sk->sk_cgrp->memory_pressure = 1;
+}
+EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
+
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       /*
+        * The root cgroup does not use res_counters, but rather,
+        * rely on the data already collected by the network
+        * subsystem
+        */
+       struct res_counter *res_parent = NULL;
+       struct cg_proto *cg_proto, *parent_cg;
+       struct tcp_memcontrol *tcp;
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+       struct net *net = current->nsproxy->net_ns;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               goto create_files;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
+       tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
+       tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
+       tcp->tcp_memory_pressure = 0;
+
+       parent_cg = tcp_prot.proto_cgroup(parent);
+       if (parent_cg)
+               res_parent = parent_cg->memory_allocated;
+
+       res_counter_init(&tcp->tcp_memory_allocated, res_parent);
+       percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+
+       cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
+       cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
+       cg_proto->sysctl_mem = tcp->tcp_prot_mem;
+       cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
+       cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
+       cg_proto->memcg = memcg;
+
+create_files:
+       return cgroup_add_files(cgrp, ss, tcp_files,
+                               ARRAY_SIZE(tcp_files));
+}
+EXPORT_SYMBOL(tcp_init_cgroup);
+
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct cg_proto *cg_proto;
+       struct tcp_memcontrol *tcp;
+       u64 val;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+
+       val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+
+       if (val != RESOURCE_MAX)
+               jump_label_dec(&memcg_socket_limit_enabled);
+}
+EXPORT_SYMBOL(tcp_destroy_cgroup);
+
+static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
+{
+       struct net *net = current->nsproxy->net_ns;
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+       u64 old_lim;
+       int i;
+       int ret;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return -EINVAL;
+
+       if (val > RESOURCE_MAX)
+               val = RESOURCE_MAX;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+       ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < 3; i++)
+               tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
+                                            net->ipv4.sysctl_tcp_mem[i]);
+
+       if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
+               jump_label_dec(&memcg_socket_limit_enabled);
+       else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
+               jump_label_inc(&memcg_socket_limit_enabled);
+
+       return 0;
+}
+
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+                           const char *buffer)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       unsigned long long val;
+       int ret = 0;
+
+       switch (cft->private) {
+       case RES_LIMIT:
+               /* see memcontrol.c */
+               ret = res_counter_memparse_write_strategy(buffer, &val);
+               if (ret)
+                       break;
+               ret = tcp_update_limit(memcg, val);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return default_val;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+}
+
+static u64 tcp_read_usage(struct mem_cgroup *memcg)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+}
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       u64 val;
+
+       switch (cft->private) {
+       case RES_LIMIT:
+               val = tcp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX);
+               break;
+       case RES_USAGE:
+               val = tcp_read_usage(memcg);
+               break;
+       case RES_FAILCNT:
+       case RES_MAX_USAGE:
+               val = tcp_read_stat(memcg, cft->private, 0);
+               break;
+       default:
+               BUG();
+       }
+       return val;
+}
+
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
+{
+       struct mem_cgroup *memcg;
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       memcg = mem_cgroup_from_cont(cont);
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return 0;
+       tcp = tcp_from_cgproto(cg_proto);
+
+       switch (event) {
+       case RES_MAX_USAGE:
+               res_counter_reset_max(&tcp->tcp_memory_allocated);
+               break;
+       case RES_FAILCNT:
+               res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+               break;
+       }
+
+       return 0;
+}
+
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
+       if (!cg_proto)
+               return 0;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+}
+
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       tcp->tcp_prot_mem[idx] = val;
+}
index 66363b689ad652586e35ecc49362fee864cf7685..550e755747e0651610d481f4e91e47ca07fcf82c 100644 (file)
@@ -336,15 +336,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
                tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
                        struct inet6_timewait_sock *tw6;
 
                        tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
                        tw6 = inet6_twsk((struct sock *)tw);
-                       ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
-                       ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+                       tw6->tw_v6_daddr = np->daddr;
+                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
                        tw->tw_tclass = np->tclass;
                        tw->tw_ipv6only = np->ipv6only;
                }
@@ -425,7 +425,7 @@ static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
  */
 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
 {
-       struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+       struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
        if (newsk != NULL) {
                const struct inet_request_sock *ireq = inet_rsk(req);
@@ -495,7 +495,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->frto_counter = 0;
                newtp->frto_highmark = 0;
 
-               newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
+               if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
+                   !try_module_get(newicsk->icsk_ca_ops->owner))
+                       newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
 
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
index 980b98f6288c3f82f85da0ac61f74ecfa0cbc12b..8c8de2780c7a7add9e91805300824e7182d40f28 100644 (file)
@@ -1093,6 +1093,13 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
 {
        int i, k, eat;
 
+       eat = min_t(int, len, skb_headlen(skb));
+       if (eat) {
+               __skb_pull(skb, eat);
+               len -= eat;
+               if (!len)
+                       return;
+       }
        eat = len;
        k = 0;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -1124,11 +1131,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
        if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
                return -ENOMEM;
 
-       /* If len == headlen, we avoid __skb_pull to preserve alignment. */
-       if (unlikely(len < skb_headlen(skb)))
-               __skb_pull(skb, len);
-       else
-               __pskb_trim_head(skb, len - skb_headlen(skb));
+       __pskb_trim_head(skb, len);
 
        TCP_SKB_CB(skb)->seq += len;
        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1382,7 +1385,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
 /* Return 0, if packet can be sent now without violation Nagle's rules:
  * 1. It is full sized.
  * 2. Or it contains FIN. (already checked by caller)
- * 3. Or TCP_NODELAY was set.
+ * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
  *    With Minshall's modification: all sent small packets are ACKed.
  */
@@ -1581,7 +1584,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
                 * frame, so if we have space for more than 3 frames
                 * then send now.
                 */
-               if (limit > tcp_max_burst(tp) * tp->mss_cache)
+               if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
                        goto send_now;
        }
 
@@ -1919,7 +1922,7 @@ u32 __tcp_select_window(struct sock *sk)
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
-               if (tcp_memory_pressure)
+               if (sk_under_memory_pressure(sk))
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh,
                                               4U * tp->advmss);
 
@@ -2147,7 +2150,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         */
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
-       err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+       /* make sure skb->data is aligned on arches that require it */
+       if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
+               struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+                                                  GFP_ATOMIC);
+               err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+                            -ENOBUFS;
+       } else {
+               err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+       }
 
        if (err == 0) {
                /* Update global TCP statistics. */
index 2e0f0af76c19b36032ab077afa2e2bf28467748b..a516d1e399dfcb1a22ca39e452f3dbe725455ade 100644 (file)
@@ -171,13 +171,13 @@ static int tcp_write_timeout(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int retry_until;
-       bool do_reset, syn_set = 0;
+       bool do_reset, syn_set = false;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits)
                        dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
-               syn_set = 1;
+               syn_set = true;
        } else {
                if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
                        /* Black hole detection */
@@ -261,7 +261,7 @@ static void tcp_delack_timer(unsigned long data)
        }
 
 out:
-       if (tcp_memory_pressure)
+       if (sk_under_memory_pressure(sk))
                sk_mem_reclaim(sk);
 out_unlock:
        bh_unlock_sock(sk);
@@ -340,7 +340,7 @@ void tcp_retransmit_timer(struct sock *sk)
                               &inet->inet_daddr, ntohs(inet->inet_dport),
                               inet->inet_num, tp->snd_una, tp->snd_nxt);
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (sk->sk_family == AF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
index ac3b3ee4b07c85a32a6f4c35484741c0c3cc7976..01775983b997da37ab2c85faa05da34b34e0ffd9 100644 (file)
@@ -105,7 +105,7 @@ drop:
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int tunnel64_rcv(struct sk_buff *skb)
 {
        struct xfrm_tunnel *handler;
@@ -134,7 +134,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
                        break;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void tunnel64_err(struct sk_buff *skb, u32 info)
 {
        struct xfrm_tunnel *handler;
@@ -152,7 +152,7 @@ static const struct net_protocol tunnel4_protocol = {
        .netns_ok       =       1,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static const struct net_protocol tunnel64_protocol = {
        .handler        =       tunnel64_rcv,
        .err_handler    =       tunnel64_err,
@@ -167,7 +167,7 @@ static int __init tunnel4_init(void)
                printk(KERN_ERR "tunnel4 init: can't add protocol\n");
                return -EAGAIN;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
                printk(KERN_ERR "tunnel64 init: can't add protocol\n");
                inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
@@ -179,7 +179,7 @@ static int __init tunnel4_init(void)
 
 static void __exit tunnel4_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
                printk(KERN_ERR "tunnel64 close: can't remove protocol\n");
 #endif
index ab0966df1e2a8aec9e4ecb40e77332cbebbde466..5d075b5f70fcd61dabe0c38ac6864771b4022093 100644 (file)
@@ -445,7 +445,7 @@ exact_match:
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
-static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
                __be16 sport, __be32 daddr, __be16 dport,
                int dif, struct udp_table *udptable)
 {
@@ -512,6 +512,7 @@ begin:
        rcu_read_unlock();
        return result;
 }
+EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
 
 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
                                                 __be16 sport, __be16 dport,
@@ -1164,7 +1165,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@ -1186,9 +1187,10 @@ try_again:
                goto out;
 
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
 
        /*
@@ -1197,14 +1199,14 @@ try_again:
         * coverage checksum (UDP-Lite), do it before the copy.
         */
 
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
 
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov, len);
+                                             msg->msg_iov, copied);
        else {
                err = skb_copy_and_csum_datagram_iovec(skb,
                                                       sizeof(struct udphdr),
@@ -1233,7 +1235,7 @@ try_again:
        if (inet->cmsg_flags)
                ip_cmsg_recv(msg, skb);
 
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
 
@@ -1357,7 +1359,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        if (inet_sk(sk)->inet_daddr)
                sock_rps_save_rxhash(sk, skb);
 
-       rc = ip_queue_rcv_skb(sk, skb);
+       rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0) {
                int is_udplite = IS_UDPLITE(sk);
 
@@ -1473,6 +1475,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        rc = 0;
 
+       ipv4_pktinfo_prepare(skb);
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
@@ -2246,7 +2249,8 @@ int udp4_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
new file mode 100644 (file)
index 0000000..69f8a7c
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * udp_diag.c  Module for monitoring UDP transport protocols sockets.
+ *
+ * Authors:    Pavel Emelyanov, <xemul@parallels.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/udp.h>
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *req,
+               struct nlattr *bc)
+{
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
+
+       return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+                       cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+}
+
+static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
+               const struct nlmsghdr *nlh, struct inet_diag_req *req)
+{
+       int err = -EINVAL;
+       struct sock *sk;
+       struct sk_buff *rep;
+
+       if (req->sdiag_family == AF_INET)
+               sk = __udp4_lib_lookup(&init_net,
+                               req->id.idiag_src[0], req->id.idiag_sport,
+                               req->id.idiag_dst[0], req->id.idiag_dport,
+                               req->id.idiag_if, tbl);
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (req->sdiag_family == AF_INET6)
+               sk = __udp6_lib_lookup(&init_net,
+                               (struct in6_addr *)req->id.idiag_src,
+                               req->id.idiag_sport,
+                               (struct in6_addr *)req->id.idiag_dst,
+                               req->id.idiag_dport,
+                               req->id.idiag_if, tbl);
+#endif
+       else
+               goto out_nosk;
+
+       err = -ENOENT;
+       if (sk == NULL)
+               goto out_nosk;
+
+       err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+       if (err)
+               goto out;
+
+       err = -ENOMEM;
+       rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
+                                    sizeof(struct inet_diag_meminfo) +
+                                    64)), GFP_KERNEL);
+       if (!rep)
+               goto out;
+
+       err = inet_sk_diag_fill(sk, NULL, rep, req,
+                          NETLINK_CB(in_skb).pid,
+                          nlh->nlmsg_seq, 0, nlh);
+       if (err < 0) {
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(rep);
+               goto out;
+       }
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+                             MSG_DONTWAIT);
+       if (err > 0)
+               err = 0;
+out:
+       if (sk)
+               sock_put(sk);
+out_nosk:
+       return err;
+}
+
+static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       int num, s_num, slot, s_slot;
+
+       s_slot = cb->args[0];
+       num = s_num = cb->args[1];
+
+       for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+               struct sock *sk;
+               struct hlist_nulls_node *node;
+               struct udp_hslot *hslot = &table->hash[slot];
+
+               if (hlist_nulls_empty(&hslot->head))
+                       continue;
+
+               spin_lock_bh(&hslot->lock);
+               sk_nulls_for_each(sk, node, &hslot->head) {
+                       struct inet_sock *inet = inet_sk(sk);
+
+                       if (num < s_num)
+                               goto next;
+                       if (!(r->idiag_states & (1 << sk->sk_state)))
+                               goto next;
+                       if (r->sdiag_family != AF_UNSPEC &&
+                                       sk->sk_family != r->sdiag_family)
+                               goto next;
+                       if (r->id.idiag_sport != inet->inet_sport &&
+                           r->id.idiag_sport)
+                               goto next;
+                       if (r->id.idiag_dport != inet->inet_dport &&
+                           r->id.idiag_dport)
+                               goto next;
+
+                       if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+                               spin_unlock_bh(&hslot->lock);
+                               goto done;
+                       }
+next:
+                       num++;
+               }
+               spin_unlock_bh(&hslot->lock);
+       }
+done:
+       cb->args[0] = slot;
+       cb->args[1] = num;
+}
+
+static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       udp_dump(&udp_table, skb, cb, r, bc);
+}
+
+static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return udp_dump_one(&udp_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udp_diag_handler = {
+       .dump            = udp_diag_dump,
+       .dump_one        = udp_diag_dump_one,
+       .idiag_type      = IPPROTO_UDP,
+};
+
+static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       udp_dump(&udplite_table, skb, cb, r, bc);
+}
+
+static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return udp_dump_one(&udplite_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udplite_diag_handler = {
+       .dump            = udplite_diag_dump,
+       .dump_one        = udplite_diag_dump_one,
+       .idiag_type      = IPPROTO_UDPLITE,
+};
+
+static int __init udp_diag_init(void)
+{
+       int err;
+
+       err = inet_diag_register(&udp_diag_handler);
+       if (err)
+               goto out;
+       err = inet_diag_register(&udplite_diag_handler);
+       if (err)
+               goto out_lite;
+out:
+       return err;
+out_lite:
+       inet_diag_unregister(&udp_diag_handler);
+       goto out;
+}
+
+static void __exit udp_diag_exit(void)
+{
+       inet_diag_unregister(&udplite_diag_handler);
+       inet_diag_unregister(&udp_diag_handler);
+}
+
+module_init(udp_diag_init);
+module_exit(udp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
index 82806455e8598862d1e925ff24a05403e8cd6519..9247d9d70e9db5439d8e59fb7748f47279da91f5 100644 (file)
@@ -64,7 +64,7 @@ static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
        .priority       =       2,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
        .handler        =       xfrm_tunnel_rcv,
        .err_handler    =       xfrm_tunnel_err,
@@ -84,7 +84,7 @@ static int __init ipip_init(void)
                xfrm_unregister_type(&ipip_type, AF_INET);
                return -EAGAIN;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
                printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n");
                xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
@@ -97,7 +97,7 @@ static int __init ipip_init(void)
 
 static void __exit ipip_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
                printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n");
 #endif
index cf88df82e2c21ca21da8184f0370096b094dffba..0ba0866230c99367db4f3706b15cb2ed0ff91dbf 100644 (file)
@@ -630,13 +630,13 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
                goto out;
        }
 
-       rt = addrconf_dst_alloc(idev, addr, 0);
+       rt = addrconf_dst_alloc(idev, addr, false);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto out;
        }
 
-       ipv6_addr_copy(&ifa->addr, addr);
+       ifa->addr = *addr;
 
        spin_lock_init(&ifa->lock);
        spin_lock_init(&ifa->state_lock);
@@ -650,16 +650,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
 
        ifa->rt = rt;
 
-       /*
-        * part one of RFC 4429, section 3.3
-        * We should not configure an address as
-        * optimistic if we do not yet know the link
-        * layer address of our nexhop router
-        */
-
-       if (dst_get_neighbour_raw(&rt->dst) == NULL)
-               ifa->flags &= ~IFA_F_OPTIMISTIC;
-
        ifa->idev = idev;
        in6_dev_hold(idev);
        /* For caller */
@@ -807,7 +797,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
                                ip6_del_rt(rt);
                                rt = NULL;
                        } else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
-                               rt->rt6i_expires = expires;
+                               rt->dst.expires = expires;
                                rt->rt6i_flags |= RTF_EXPIRES;
                        }
                }
@@ -1228,7 +1218,7 @@ try_nextdev:
        if (!hiscore->ifa)
                return -EADDRNOTAVAIL;
 
-       ipv6_addr_copy(saddr, &hiscore->ifa->addr);
+       *saddr = hiscore->ifa->addr;
        in6_ifa_put(hiscore->ifa);
        return 0;
 }
@@ -1249,7 +1239,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
                list_for_each_entry(ifp, &idev->addr_list, if_list) {
                        if (ifp->scope == IFA_LINK &&
                            !(ifp->flags & banned_flags)) {
-                               ipv6_addr_copy(addr, &ifp->addr);
+                               *addr = ifp->addr;
                                err = 0;
                                break;
                        }
@@ -1700,7 +1690,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
                .fc_protocol = RTPROT_KERNEL,
        };
 
-       ipv6_addr_copy(&cfg.fc_dst, pfx);
+       cfg.fc_dst = *pfx;
 
        /* Prevent useless cloning on PtP SIT.
           This thing is done here expecting that the whole
@@ -1733,7 +1723,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        if (!fn)
                goto out;
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->rt6i_dev->ifindex != dev->ifindex)
+               if (rt->dst.dev->ifindex != dev->ifindex)
                        continue;
                if ((rt->rt6i_flags & flags) != flags)
                        continue;
@@ -1805,14 +1795,15 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
                return ERR_PTR(-EACCES);
 
        /* Add default multicast route */
-       addrconf_add_mroute(dev);
+       if (!(dev->flags & IFF_LOOPBACK))
+               addrconf_add_mroute(dev);
 
        /* Add link local route */
        addrconf_add_lroute(dev);
        return idev;
 }
 
-void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
+void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
 {
        struct prefix_info *pinfo;
        __u32 valid_lft;
@@ -1890,11 +1881,11 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
                                rt = NULL;
                        } else if (addrconf_finite_timeout(rt_expires)) {
                                /* not infinity */
-                               rt->rt6i_expires = jiffies + rt_expires;
+                               rt->dst.expires = jiffies + rt_expires;
                                rt->rt6i_flags |= RTF_EXPIRES;
                        } else {
                                rt->rt6i_flags &= ~RTF_EXPIRES;
-                               rt->rt6i_expires = 0;
+                               rt->dst.expires = 0;
                        }
                } else if (valid_lft) {
                        clock_t expires = 0;
@@ -1943,7 +1934,7 @@ ok:
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
                        if (in6_dev->cnf.optimistic_dad &&
-                           !net->ipv6.devconf_all->forwarding)
+                           !net->ipv6.devconf_all->forwarding && sllao)
                                addr_flags = IFA_F_OPTIMISTIC;
 #endif
 
@@ -3077,20 +3068,39 @@ static void addrconf_dad_run(struct inet6_dev *idev)
 struct if6_iter_state {
        struct seq_net_private p;
        int bucket;
+       int offset;
 };
 
-static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
+static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
 {
        struct inet6_ifaddr *ifa = NULL;
        struct if6_iter_state *state = seq->private;
        struct net *net = seq_file_net(seq);
+       int p = 0;
+
+       /* initial bucket if pos is 0 */
+       if (pos == 0) {
+               state->bucket = 0;
+               state->offset = 0;
+       }
 
-       for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
+       for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
                struct hlist_node *n;
                hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
-                                        addr_lst)
+                                        addr_lst) {
+                       /* sync with offset */
+                       if (p < state->offset) {
+                               p++;
+                               continue;
+                       }
+                       state->offset++;
                        if (net_eq(dev_net(ifa->idev->dev), net))
                                return ifa;
+               }
+
+               /* prepare for next bucket */
+               state->offset = 0;
+               p = 0;
        }
        return NULL;
 }
@@ -3102,13 +3112,17 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct hlist_node *n = &ifa->addr_lst;
 
-       hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
+       hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
+               state->offset++;
                if (net_eq(dev_net(ifa->idev->dev), net))
                        return ifa;
+       }
 
        while (++state->bucket < IN6_ADDR_HSIZE) {
+               state->offset = 0;
                hlist_for_each_entry_rcu_bh(ifa, n,
                                     &inet6_addr_lst[state->bucket], addr_lst) {
+                       state->offset++;
                        if (net_eq(dev_net(ifa->idev->dev), net))
                                return ifa;
                }
@@ -3117,21 +3131,11 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
        return NULL;
 }
 
-static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
-{
-       struct inet6_ifaddr *ifa = if6_get_first(seq);
-
-       if (ifa)
-               while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
-                       --pos;
-       return pos ? NULL : ifa;
-}
-
 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(rcu_bh)
 {
        rcu_read_lock_bh();
-       return if6_get_idx(seq, *pos);
+       return if6_get_first(seq, *pos);
 }
 
 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
index d27c797f9f05e76b82317d1a73818e0a7557bdf3..273f48d1df2e974b5722e9133c3dd04ffd39473e 100644 (file)
@@ -347,7 +347,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                         */
                        v4addr = LOOPBACK4_IPV6;
                        if (!(addr_type & IPV6_ADDR_MULTICAST)) {
-                               if (!inet->transparent &&
+                               if (!(inet->freebind || inet->transparent) &&
                                    !ipv6_chk_addr(net, &addr->sin6_addr,
                                                   dev, 0)) {
                                        err = -EADDRNOTAVAIL;
@@ -361,10 +361,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        inet->inet_rcv_saddr = v4addr;
        inet->inet_saddr = v4addr;
 
-       ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+       np->rcv_saddr = addr->sin6_addr;
 
        if (!(addr_type & IPV6_ADDR_MULTICAST))
-               ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+               np->saddr = addr->sin6_addr;
 
        /* Make sure we are allowed to bind here. */
        if (sk->sk_prot->get_port(sk, snum)) {
@@ -458,14 +458,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
                    peer == 1)
                        return -ENOTCONN;
                sin->sin6_port = inet->inet_dport;
-               ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
+               sin->sin6_addr = np->daddr;
                if (np->sndflow)
                        sin->sin6_flowinfo = np->flow_label;
        } else {
                if (ipv6_addr_any(&np->rcv_saddr))
-                       ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
+                       sin->sin6_addr = np->saddr;
                else
-                       ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr);
+                       sin->sin6_addr = np->rcv_saddr;
 
                sin->sin6_port = inet->inet_sport;
        }
@@ -660,8 +660,8 @@ int inet6_sk_rebuild_header(struct sock *sk)
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = sk->sk_protocol;
-               ipv6_addr_copy(&fl6.daddr, &np->daddr);
-               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+               fl6.daddr = np->daddr;
+               fl6.saddr = np->saddr;
                fl6.flowlabel = np->flow_label;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.flowi6_mark = sk->sk_mark;
@@ -769,7 +769,8 @@ out:
        return err;
 }
 
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct ipv6hdr *ipv6h;
@@ -985,9 +986,9 @@ static int __net_init ipv6_init_mibs(struct net *net)
                          sizeof(struct icmpv6_mib),
                          __alignof__(struct icmpv6_mib)) < 0)
                goto err_icmp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
-                         sizeof(struct icmpv6msg_mib),
-                         __alignof__(struct icmpv6msg_mib)) < 0)
+       net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
+                                               GFP_KERNEL);
+       if (!net->mib.icmpv6msg_statistics)
                goto err_icmpmsg_mib;
        return 0;
 
@@ -1008,7 +1009,7 @@ static void ipv6_cleanup_mibs(struct net *net)
        snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
        snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
        snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
-       snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
+       kfree(net->mib.icmpv6msg_statistics);
 }
 
 static int __net_init inet6_net_init(struct net *net)
@@ -1115,6 +1116,8 @@ static int __init inet6_init(void)
        if (err)
                goto static_sysctl_fail;
 #endif
+       tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
        /*
         *      ipngwg API draft makes clear that the correct semantics
         *      for TCP and UDP is to consider one TCP and UDP instance
index 2195ae651923e0e3242c3e61738fed91d19711ff..2ae79dbeec2feb356f1d91f10a9853cc5f1b5706 100644 (file)
@@ -193,9 +193,9 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
                                                printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
                                        goto bad;
                                }
-                               ipv6_addr_copy(&final_addr, &hao->addr);
-                               ipv6_addr_copy(&hao->addr, &iph->saddr);
-                               ipv6_addr_copy(&iph->saddr, &final_addr);
+                               final_addr = hao->addr;
+                               hao->addr = iph->saddr;
+                               iph->saddr = final_addr;
                        }
                        break;
                }
@@ -241,13 +241,13 @@ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
        segments = rthdr->hdrlen >> 1;
 
        addrs = ((struct rt0_hdr *)rthdr)->addr;
-       ipv6_addr_copy(&final_addr, addrs + segments - 1);
+       final_addr = addrs[segments - 1];
 
        addrs += segments - segments_left;
        memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
 
-       ipv6_addr_copy(addrs, &iph->daddr);
-       ipv6_addr_copy(&iph->daddr, &final_addr);
+       addrs[0] = iph->daddr;
+       iph->daddr = final_addr;
 }
 
 static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
@@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
 #endif
        }
 
-       err = ah->nexthdr;
-
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_output_resume(skb, err);
 }
@@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
        if (err)
                goto out;
 
+       err = ah->nexthdr;
+
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, hdr_len);
        __skb_pull(skb, ah_hlen + hdr_len);
        skb_set_transport_header(skb, -hdr_len);
-
-       err = ah->nexthdr;
 out:
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_input_resume(skb, err);
@@ -583,8 +581,6 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
                if (err == -EINPROGRESS)
                        goto out;
 
-               if (err == -EBUSY)
-                       err = NET_XMIT_DROP;
                goto out_free;
        }
 
index 674255f5e6b7477e6fc4a5f3acb5d336883389f3..59402b4637f90d57a8be2664c921f2def94ead3b 100644 (file)
@@ -75,7 +75,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        if (pac == NULL)
                return -ENOMEM;
        pac->acl_next = NULL;
-       ipv6_addr_copy(&pac->acl_addr, addr);
+       pac->acl_addr = *addr;
 
        rcu_read_lock();
        if (ifindex == 0) {
@@ -83,7 +83,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
                rt = rt6_lookup(net, addr, NULL, 0, 0);
                if (rt) {
-                       dev = rt->rt6i_dev;
+                       dev = rt->dst.dev;
                        dst_release(&rt->dst);
                } else if (ishost) {
                        err = -EADDRNOTAVAIL;
@@ -289,14 +289,14 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
                goto out;
        }
 
-       rt = addrconf_dst_alloc(idev, addr, 1);
+       rt = addrconf_dst_alloc(idev, addr, true);
        if (IS_ERR(rt)) {
                kfree(aca);
                err = PTR_ERR(rt);
                goto out;
        }
 
-       ipv6_addr_copy(&aca->aca_addr, addr);
+       aca->aca_addr = *addr;
        aca->aca_idev = idev;
        aca->aca_rt = rt;
        aca->aca_users = 1;
index e2480691c220a16f93f602ce19266ffb2e7f017f..ae08aee1773c678187f8be84414f95ac472aa426 100644 (file)
@@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
-                       ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+                       usin->sin6_addr = flowlabel->dst;
                }
        }
 
@@ -143,7 +143,7 @@ ipv4_connected:
                }
        }
 
-       ipv6_addr_copy(&np->daddr, daddr);
+       np->daddr = *daddr;
        np->flow_label = fl6.flowlabel;
 
        inet->inet_dport = usin->sin6_port;
@@ -154,8 +154,8 @@ ipv4_connected:
         */
 
        fl6.flowi6_proto = sk->sk_protocol;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+       fl6.daddr = np->daddr;
+       fl6.saddr = np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = inet->inet_dport;
@@ -179,10 +179,10 @@ ipv4_connected:
        /* source address lookup done in ip6_dst_lookup */
 
        if (ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&np->saddr, &fl6.saddr);
+               np->saddr = fl6.saddr;
 
        if (ipv6_addr_any(&np->rcv_saddr)) {
-               ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr);
+               np->rcv_saddr = fl6.saddr;
                inet->inet_rcv_saddr = LOOPBACK4_IPV6;
                if (sk->sk_prot->rehash)
                        sk->sk_prot->rehash(sk);
@@ -257,7 +257,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
        skb_put(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        iph = ipv6_hdr(skb);
-       ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+       iph->daddr = fl6->daddr;
 
        serr = SKB_EXT_ERR(skb);
        serr->ee.ee_errno = err;
@@ -294,7 +294,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
        skb_put(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        iph = ipv6_hdr(skb);
-       ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+       iph->daddr = fl6->daddr;
 
        mtu_info = IP6CBMTU(skb);
 
@@ -303,7 +303,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
        mtu_info->ip6m_addr.sin6_port = 0;
        mtu_info->ip6m_addr.sin6_flowinfo = 0;
        mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
-       ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
+       mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
 
        __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
        skb_reset_transport_header(skb);
@@ -354,8 +354,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                sin->sin6_port = serr->port;
                sin->sin6_scope_id = 0;
                if (skb->protocol == htons(ETH_P_IPV6)) {
-                       ipv6_addr_copy(&sin->sin6_addr,
-                                 (struct in6_addr *)(nh + serr->addr_offset));
+                       sin->sin6_addr =
+                               *(struct in6_addr *)(nh + serr->addr_offset);
                        if (np->sndflow)
                                sin->sin6_flowinfo =
                                        (*(__be32 *)(nh + serr->addr_offset - 24) &
@@ -376,7 +376,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                sin->sin6_flowinfo = 0;
                sin->sin6_scope_id = 0;
                if (skb->protocol == htons(ETH_P_IPV6)) {
-                       ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
+                       sin->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (np->rxopt.all)
                                datagram_recv_ctl(sk, msg, skb);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -451,7 +451,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
                sin->sin6_flowinfo = 0;
                sin->sin6_port = 0;
                sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
-               ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
+               sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
        }
 
        put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
@@ -475,7 +475,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                struct in6_pktinfo src_info;
 
                src_info.ipi6_ifindex = opt->iif;
-               ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+               src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
                put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
        }
 
@@ -550,7 +550,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                struct in6_pktinfo src_info;
 
                src_info.ipi6_ifindex = opt->iif;
-               ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+               src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
                put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
        }
        if (np->rxopt.bits.rxohlim) {
@@ -584,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                         */
 
                        sin6.sin6_family = AF_INET6;
-                       ipv6_addr_copy(&sin6.sin6_addr, &ipv6_hdr(skb)->daddr);
+                       sin6.sin6_addr = ipv6_hdr(skb)->daddr;
                        sin6.sin6_port = ports[1];
                        sin6.sin6_flowinfo = 0;
                        sin6.sin6_scope_id = 0;
@@ -654,12 +654,12 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
 
                        if (addr_type != IPV6_ADDR_ANY) {
                                int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
-                               if (!inet_sk(sk)->transparent &&
+                               if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) &&
                                    !ipv6_chk_addr(net, &src_info->ipi6_addr,
                                                   strict ? dev : NULL, 0))
                                        err = -EINVAL;
                                else
-                                       ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr);
+                                       fl6->saddr = src_info->ipi6_addr;
                        }
 
                        rcu_read_unlock();
index bf22a225f42218bbf05bf173d59eeae04acd8784..3d641b6e9b09256cb43f1cca0e9cabcc1526e30f 100644 (file)
@@ -243,9 +243,9 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
        if (skb->ip_summed == CHECKSUM_COMPLETE)
                skb->ip_summed = CHECKSUM_NONE;
 
-       ipv6_addr_copy(&tmp_addr, &ipv6h->saddr);
-       ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
-       ipv6_addr_copy(&hao->addr, &tmp_addr);
+       tmp_addr = ipv6h->saddr;
+       ipv6h->saddr = hao->addr;
+       hao->addr = tmp_addr;
 
        if (skb->tstamp.tv64 == 0)
                __net_timestamp(skb);
@@ -461,9 +461,9 @@ looped_back:
                return -1;
        }
 
-       ipv6_addr_copy(&daddr, addr);
-       ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
-       ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
+       daddr = *addr;
+       *addr = ipv6_hdr(skb)->daddr;
+       ipv6_hdr(skb)->daddr = daddr;
 
        skb_dst_drop(skb);
        ip6_route_input(skb);
@@ -690,7 +690,7 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
                memcpy(phdr->addr, ihdr->addr + 1,
                       (hops - 1) * sizeof(struct in6_addr));
 
-       ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p);
+       phdr->addr[hops - 1] = **addr_p;
        *addr_p = ihdr->addr;
 
        phdr->rt_hdr.nexthdr = *proto;
@@ -888,8 +888,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
        if (!opt || !opt->srcrt)
                return NULL;
 
-       ipv6_addr_copy(orig, &fl6->daddr);
-       ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr);
+       *orig = fl6->daddr;
+       fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
        return orig;
 }
 
index 37f548b7f6dc73c4489107a59ee48e4928efa8f1..72957f4a7c6c9f6c24056a5c3eb6046b61c7a3ea 100644 (file)
@@ -57,6 +57,9 @@ int ipv6_ext_hdr(u8 nexthdr)
  *         it returns NULL.
  *       - First fragment header is skipped, not-first ones
  *         are considered as unparsable.
+ *       - Reports the offset field of the final fragment header so it is
+ *         possible to tell whether this is a first fragment, later fragment,
+ *         or not fragmented.
  *       - ESP is unparsable for now and considered like
  *         normal payload protocol.
  *       - Note also special handling of AUTH header. Thanks to IPsec wizards.
@@ -64,10 +67,13 @@ int ipv6_ext_hdr(u8 nexthdr)
  * --ANK (980726)
  */
 
-int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
+int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
+                    __be16 *frag_offp)
 {
        u8 nexthdr = *nexthdrp;
 
+       *frag_offp = 0;
+
        while (ipv6_ext_hdr(nexthdr)) {
                struct ipv6_opt_hdr _hdr, *hp;
                int hdrlen;
@@ -87,7 +93,8 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
                        if (fp == NULL)
                                return -1;
 
-                       if (ntohs(*fp) & ~0x7)
+                       *frag_offp = *fp;
+                       if (ntohs(*frag_offp) & ~0x7)
                                break;
                        hdrlen = 8;
                } else if (nexthdr == NEXTHDR_AUTH)
index 295571576f8383190820449f6f9b6dbe264b61d1..b6c573152067c36c90841a35ba191b611d133acc 100644 (file)
@@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
                        if (!ipv6_prefix_equal(&saddr, &r->src.addr,
                                               r->src.plen))
                                goto again;
-                       ipv6_addr_copy(&flp6->saddr, &saddr);
+                       flp6->saddr = saddr;
                }
                goto out;
        }
index 90868fb42757bf8431eaea968d861d81c4da5c10..01d46bff63c312b7804c8132d6860a1a749eb043 100644 (file)
@@ -135,11 +135,12 @@ static int is_ineligible(struct sk_buff *skb)
        int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
        int len = skb->len - ptr;
        __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+       __be16 frag_off;
 
        if (len < 0)
                return 1;
 
-       ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
+       ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
        if (ptr < 0)
                return 0;
        if (nexthdr == IPPROTO_ICMPV6) {
@@ -290,9 +291,9 @@ static void mip6_addr_swap(struct sk_buff *skb)
                if (likely(off >= 0)) {
                        hao = (struct ipv6_destopt_hao *)
                                        (skb_network_header(skb) + off);
-                       ipv6_addr_copy(&tmp, &iph->saddr);
-                       ipv6_addr_copy(&iph->saddr, &hao->addr);
-                       ipv6_addr_copy(&hao->addr, &tmp);
+                       tmp = iph->saddr;
+                       iph->saddr = hao->addr;
+                       hao->addr = tmp;
                }
        }
 }
@@ -444,9 +445,9 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_ICMPV6;
-       ipv6_addr_copy(&fl6.daddr, &hdr->saddr);
+       fl6.daddr = hdr->saddr;
        if (saddr)
-               ipv6_addr_copy(&fl6.saddr, saddr);
+               fl6.saddr = *saddr;
        fl6.flowi6_oif = iif;
        fl6.fl6_icmp_type = type;
        fl6.fl6_icmp_code = code;
@@ -538,9 +539,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_ICMPV6;
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
+       fl6.daddr = ipv6_hdr(skb)->saddr;
        if (saddr)
-               ipv6_addr_copy(&fl6.saddr, saddr);
+               fl6.saddr = *saddr;
        fl6.flowi6_oif = skb->dev->ifindex;
        fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -596,6 +597,7 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
        int inner_offset;
        int hash;
        u8 nexthdr;
+       __be16 frag_off;
 
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                return;
@@ -603,7 +605,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
        nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
        if (ipv6_ext_hdr(nexthdr)) {
                /* now skip over extension headers */
-               inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+               inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+                                               &nexthdr, &frag_off);
                if (inner_offset<0)
                        return;
        } else {
@@ -786,8 +789,8 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
                      int oif)
 {
        memset(fl6, 0, sizeof(*fl6));
-       ipv6_addr_copy(&fl6->saddr, saddr);
-       ipv6_addr_copy(&fl6->daddr, daddr);
+       fl6->saddr = *saddr;
+       fl6->daddr = *daddr;
        fl6->flowi6_proto       = IPPROTO_ICMPV6;
        fl6->fl6_icmp_type      = type;
        fl6->fl6_icmp_code      = 0;
index fee46d5a2f125f54451f5115dbddd050138af3a7..02dd203d9eacb2269eaa91336bb41204257e937d 100644 (file)
@@ -65,9 +65,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+       fl6.daddr = treq->rmt_addr;
        final_p = fl6_update_dst(&fl6, np->opt, &final);
-       ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+       fl6.saddr = treq->loc_addr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
  * request_sock (formerly open request) hash tables.
  */
 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
-                          const u32 rnd, const u16 synq_hsize)
+                          const u32 rnd, const u32 synq_hsize)
 {
        u32 c;
 
@@ -157,7 +157,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
 
        sin6->sin6_family = AF_INET6;
-       ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
+       sin6->sin6_addr = np->daddr;
        sin6->sin6_port = inet_sk(sk)->inet_dport;
        /* We do not store received flowlabel for TCP */
        sin6->sin6_flowinfo = 0;
@@ -215,8 +215,8 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = sk->sk_protocol;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+       fl6.daddr = np->daddr;
+       fl6.saddr = np->saddr;
        fl6.flowlabel = np->flow_label;
        IP6_ECN_flow_xmit(sk, fl6.flowlabel);
        fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -246,7 +246,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
        skb_dst_set_noref(skb, dst);
 
        /* Restore final destination back after routing done */
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+       fl6.daddr = np->daddr;
 
        res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
        rcu_read_unlock();
index 93718f3db79b3bcacd7bef609978175c68211db3..b82bcde53f7a02ddceb2a015a7e54d2fc52373e7 100644 (file)
@@ -190,7 +190,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
        struct fib6_table *table;
 
        table = kzalloc(sizeof(*table), GFP_ATOMIC);
-       if (table != NULL) {
+       if (table) {
                table->tb6_id = id;
                table->tb6_root.leaf = net->ipv6.ip6_null_entry;
                table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
@@ -210,7 +210,7 @@ struct fib6_table *fib6_new_table(struct net *net, u32 id)
                return tb;
 
        tb = fib6_alloc_table(net, id);
-       if (tb != NULL)
+       if (tb)
                fib6_link_table(net, tb);
 
        return tb;
@@ -367,7 +367,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        s_e = cb->args[1];
 
        w = (void *)cb->args[2];
-       if (w == NULL) {
+       if (!w) {
                /* New dump:
                 *
                 * 1. hook callback destructor.
@@ -379,7 +379,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
                 * 2. allocate and initialize walker.
                 */
                w = kzalloc(sizeof(*w), GFP_ATOMIC);
-               if (w == NULL)
+               if (!w)
                        return -ENOMEM;
                w->func = fib6_dump_node;
                cb->args[2] = (long)w;
@@ -425,7 +425,8 @@ out:
 
 static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
                                     int addrlen, int plen,
-                                    int offset)
+                                    int offset, int allow_create,
+                                    int replace_required)
 {
        struct fib6_node *fn, *in, *ln;
        struct fib6_node *pn = NULL;
@@ -447,8 +448,18 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
                 *      Prefix match
                 */
                if (plen < fn->fn_bit ||
-                   !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
+                   !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
+                       if (!allow_create) {
+                               if (replace_required) {
+                                       pr_warn("IPv6: Can't replace route, "
+                                               "no match found\n");
+                                       return ERR_PTR(-ENOENT);
+                               }
+                               pr_warn("IPv6: NLM_F_CREATE should be set "
+                                       "when creating new route\n");
+                       }
                        goto insert_above;
+               }
 
                /*
                 *      Exact match ?
@@ -456,7 +467,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
 
                if (plen == fn->fn_bit) {
                        /* clean up an intermediate node */
-                       if ((fn->fn_flags & RTN_RTINFO) == 0) {
+                       if (!(fn->fn_flags & RTN_RTINFO)) {
                                rt6_release(fn->leaf);
                                fn->leaf = NULL;
                        }
@@ -477,6 +488,23 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
                fn = dir ? fn->right: fn->left;
        } while (fn);
 
+       if (!allow_create) {
+               /* We should not create new node because
+                * NLM_F_REPLACE was specified without NLM_F_CREATE
+                * I assume it is safe to require NLM_F_CREATE when
+                * REPLACE flag is used! Later we may want to remove the
+                * check for replace_required, because according
+                * to netlink specification, NLM_F_CREATE
+                * MUST be specified if new route is created.
+                * That would keep IPv6 consistent with IPv4
+                */
+               if (replace_required) {
+                       pr_warn("IPv6: Can't replace route, no match found\n");
+                       return ERR_PTR(-ENOENT);
+               }
+               pr_warn("IPv6: NLM_F_CREATE should be set "
+                       "when creating new route\n");
+       }
        /*
         *      We walked to the bottom of tree.
         *      Create new leaf node without children.
@@ -484,7 +512,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
 
        ln = node_alloc();
 
-       if (ln == NULL)
+       if (!ln)
                return NULL;
        ln->fn_bit = plen;
 
@@ -527,7 +555,7 @@ insert_above:
                in = node_alloc();
                ln = node_alloc();
 
-               if (in == NULL || ln == NULL) {
+               if (!in || !ln) {
                        if (in)
                                node_free(in);
                        if (ln)
@@ -581,7 +609,7 @@ insert_above:
 
                ln = node_alloc();
 
-               if (ln == NULL)
+               if (!ln)
                        return NULL;
 
                ln->fn_bit = plen;
@@ -614,10 +642,15 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 {
        struct rt6_info *iter = NULL;
        struct rt6_info **ins;
+       int replace = (info->nlh &&
+                      (info->nlh->nlmsg_flags & NLM_F_REPLACE));
+       int add = (!info->nlh ||
+                  (info->nlh->nlmsg_flags & NLM_F_CREATE));
+       int found = 0;
 
        ins = &fn->leaf;
 
-       for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) {
+       for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
                /*
                 *      Search for duplicates
                 */
@@ -626,17 +659,24 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                        /*
                         *      Same priority level
                         */
+                       if (info->nlh &&
+                           (info->nlh->nlmsg_flags & NLM_F_EXCL))
+                               return -EEXIST;
+                       if (replace) {
+                               found++;
+                               break;
+                       }
 
-                       if (iter->rt6i_dev == rt->rt6i_dev &&
+                       if (iter->dst.dev == rt->dst.dev &&
                            iter->rt6i_idev == rt->rt6i_idev &&
                            ipv6_addr_equal(&iter->rt6i_gateway,
                                            &rt->rt6i_gateway)) {
-                               if (!(iter->rt6i_flags&RTF_EXPIRES))
+                               if (!(iter->rt6i_flags & RTF_EXPIRES))
                                        return -EEXIST;
-                               iter->rt6i_expires = rt->rt6i_expires;
-                               if (!(rt->rt6i_flags&RTF_EXPIRES)) {
+                               iter->dst.expires = rt->dst.expires;
+                               if (!(rt->rt6i_flags & RTF_EXPIRES)) {
                                        iter->rt6i_flags &= ~RTF_EXPIRES;
-                                       iter->rt6i_expires = 0;
+                                       iter->dst.expires = 0;
                                }
                                return -EEXIST;
                        }
@@ -655,17 +695,40 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
        /*
         *      insert node
         */
+       if (!replace) {
+               if (!add)
+                       pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
+
+add:
+               rt->dst.rt6_next = iter;
+               *ins = rt;
+               rt->rt6i_node = fn;
+               atomic_inc(&rt->rt6i_ref);
+               inet6_rt_notify(RTM_NEWROUTE, rt, info);
+               info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
+
+               if (!(fn->fn_flags & RTN_RTINFO)) {
+                       info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+                       fn->fn_flags |= RTN_RTINFO;
+               }
 
-       rt->dst.rt6_next = iter;
-       *ins = rt;
-       rt->rt6i_node = fn;
-       atomic_inc(&rt->rt6i_ref);
-       inet6_rt_notify(RTM_NEWROUTE, rt, info);
-       info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
-
-       if ((fn->fn_flags & RTN_RTINFO) == 0) {
-               info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
-               fn->fn_flags |= RTN_RTINFO;
+       } else {
+               if (!found) {
+                       if (add)
+                               goto add;
+                       pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+                       return -ENOENT;
+               }
+               *ins = rt;
+               rt->rt6i_node = fn;
+               rt->dst.rt6_next = iter->dst.rt6_next;
+               atomic_inc(&rt->rt6i_ref);
+               inet6_rt_notify(RTM_NEWROUTE, rt, info);
+               rt6_release(iter);
+               if (!(fn->fn_flags & RTN_RTINFO)) {
+                       info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+                       fn->fn_flags |= RTN_RTINFO;
+               }
        }
 
        return 0;
@@ -674,7 +737,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
 {
        if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
-           (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
+           (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
                mod_timer(&net->ipv6.ip6_fib_timer,
                          jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
 }
@@ -696,11 +759,28 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 {
        struct fib6_node *fn, *pn = NULL;
        int err = -ENOMEM;
+       int allow_create = 1;
+       int replace_required = 0;
+
+       if (info->nlh) {
+               if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
+                       allow_create = 0;
+               if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+                       replace_required = 1;
+       }
+       if (!allow_create && !replace_required)
+               pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
 
        fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
-                       rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst));
+                       rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
+                       allow_create, replace_required);
+
+       if (IS_ERR(fn)) {
+               err = PTR_ERR(fn);
+               fn = NULL;
+       }
 
-       if (fn == NULL)
+       if (!fn)
                goto out;
 
        pn = fn;
@@ -709,7 +789,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
        if (rt->rt6i_src.plen) {
                struct fib6_node *sn;
 
-               if (fn->subtree == NULL) {
+               if (!fn->subtree) {
                        struct fib6_node *sfn;
 
                        /*
@@ -724,7 +804,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
                        /* Create subtree root node */
                        sfn = node_alloc();
-                       if (sfn == NULL)
+                       if (!sfn)
                                goto st_failure;
 
                        sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
@@ -736,9 +816,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
                        sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
                                        sizeof(struct in6_addr), rt->rt6i_src.plen,
-                                       offsetof(struct rt6_info, rt6i_src));
+                                       offsetof(struct rt6_info, rt6i_src),
+                                       allow_create, replace_required);
 
-                       if (sn == NULL) {
+                       if (!sn) {
                                /* If it is failed, discard just allocated
                                   root, and then (in st_failure) stale node
                                   in main tree.
@@ -753,13 +834,18 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
                } else {
                        sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
                                        sizeof(struct in6_addr), rt->rt6i_src.plen,
-                                       offsetof(struct rt6_info, rt6i_src));
+                                       offsetof(struct rt6_info, rt6i_src),
+                                       allow_create, replace_required);
 
-                       if (sn == NULL)
+                       if (IS_ERR(sn)) {
+                               err = PTR_ERR(sn);
+                               sn = NULL;
+                       }
+                       if (!sn)
                                goto st_failure;
                }
 
-               if (fn->leaf == NULL) {
+               if (!fn->leaf) {
                        fn->leaf = rt;
                        atomic_inc(&rt->rt6i_ref);
                }
@@ -768,10 +854,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 #endif
 
        err = fib6_add_rt2node(fn, rt, info);
-
-       if (err == 0) {
+       if (!err) {
                fib6_start_gc(info->nl_net, rt);
-               if (!(rt->rt6i_flags&RTF_CACHE))
+               if (!(rt->rt6i_flags & RTF_CACHE))
                        fib6_prune_clones(info->nl_net, pn, rt);
        }
 
@@ -819,7 +904,7 @@ st_failure:
  */
 
 struct lookup_args {
-       int             offset;         /* key offset on rt6_info       */
+       int                     offset;         /* key offset on rt6_info       */
        const struct in6_addr   *addr;          /* search key                   */
 };
 
@@ -849,11 +934,10 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
                        fn = next;
                        continue;
                }
-
                break;
        }
 
-       while(fn) {
+       while (fn) {
                if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
                        struct rt6key *key;
 
@@ -900,8 +984,7 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *da
        };
 
        fn = fib6_lookup_1(root, daddr ? args : args + 1);
-
-       if (fn == NULL || fn->fn_flags & RTN_TL_ROOT)
+       if (!fn || fn->fn_flags & RTN_TL_ROOT)
                fn = root;
 
        return fn;
@@ -961,7 +1044,7 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
        }
 #endif
 
-       if (fn && fn->fn_flags&RTN_RTINFO)
+       if (fn && fn->fn_flags & RTN_RTINFO)
                return fn;
 
        return NULL;
@@ -975,14 +1058,13 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
 
 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
 {
-       if (fn->fn_flags&RTN_ROOT)
+       if (fn->fn_flags & RTN_ROOT)
                return net->ipv6.ip6_null_entry;
 
-       while(fn) {
-               if(fn->left)
+       while (fn) {
+               if (fn->left)
                        return fn->left->leaf;
-
-               if(fn->right)
+               if (fn->right)
                        return fn->right->leaf;
 
                fn = FIB6_SUBTREE(fn);
@@ -1020,12 +1102,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
                if (children == 3 || FIB6_SUBTREE(fn)
 #ifdef CONFIG_IPV6_SUBTREES
                    /* Subtree root (i.e. fn) may have one child */
-                   || (children && fn->fn_flags&RTN_ROOT)
+                   || (children && fn->fn_flags & RTN_ROOT)
 #endif
                    ) {
                        fn->leaf = fib6_find_prefix(net, fn);
 #if RT6_DEBUG >= 2
-                       if (fn->leaf==NULL) {
+                       if (!fn->leaf) {
                                WARN_ON(!fn->leaf);
                                fn->leaf = net->ipv6.ip6_null_entry;
                        }
@@ -1058,7 +1140,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
 
                read_lock(&fib6_walker_lock);
                FOR_WALKERS(w) {
-                       if (child == NULL) {
+                       if (!child) {
                                if (w->root == fn) {
                                        w->root = w->node = NULL;
                                        RT6_TRACE("W %p adjusted by delroot 1\n", w);
@@ -1087,7 +1169,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
                read_unlock(&fib6_walker_lock);
 
                node_free(fn);
-               if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn))
+               if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn))
                        return pn;
 
                rt6_release(pn->leaf);
@@ -1121,7 +1203,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
                if (w->state == FWS_C && w->leaf == rt) {
                        RT6_TRACE("walker %p adjusted by delroute\n", w);
                        w->leaf = rt->dst.rt6_next;
-                       if (w->leaf == NULL)
+                       if (!w->leaf)
                                w->state = FWS_U;
                }
        }
@@ -1130,7 +1212,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
        rt->dst.rt6_next = NULL;
 
        /* If it was last route, expunge its radix tree node */
-       if (fn->leaf == NULL) {
+       if (!fn->leaf) {
                fn->fn_flags &= ~RTN_RTINFO;
                net->ipv6.rt6_stats->fib_route_nodes--;
                fn = fib6_repair_tree(net, fn);
@@ -1144,7 +1226,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
                 * to still alive ones.
                 */
                while (fn) {
-                       if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
+                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
                                fn->leaf = fib6_find_prefix(net, fn);
                                atomic_inc(&fn->leaf->rt6i_ref);
                                rt6_release(rt);
@@ -1171,17 +1253,17 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
                return -ENOENT;
        }
 #endif
-       if (fn == NULL || rt == net->ipv6.ip6_null_entry)
+       if (!fn || rt == net->ipv6.ip6_null_entry)
                return -ENOENT;
 
        WARN_ON(!(fn->fn_flags & RTN_RTINFO));
 
-       if (!(rt->rt6i_flags&RTF_CACHE)) {
+       if (!(rt->rt6i_flags & RTF_CACHE)) {
                struct fib6_node *pn = fn;
 #ifdef CONFIG_IPV6_SUBTREES
                /* clones of this route might be in another subtree */
                if (rt->rt6i_src.plen) {
-                       while (!(pn->fn_flags&RTN_ROOT))
+                       while (!(pn->fn_flags & RTN_ROOT))
                                pn = pn->parent;
                        pn = pn->parent;
                }
@@ -1232,11 +1314,11 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
 
        for (;;) {
                fn = w->node;
-               if (fn == NULL)
+               if (!fn)
                        return 0;
 
                if (w->prune && fn != w->root &&
-                   fn->fn_flags&RTN_RTINFO && w->state < FWS_C) {
+                   fn->fn_flags & RTN_RTINFO && w->state < FWS_C) {
                        w->state = FWS_C;
                        w->leaf = fn->leaf;
                }
@@ -1265,7 +1347,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
                        w->state = FWS_C;
                        w->leaf = fn->leaf;
                case FWS_C:
-                       if (w->leaf && fn->fn_flags&RTN_RTINFO) {
+                       if (w->leaf && fn->fn_flags & RTN_RTINFO) {
                                int err;
 
                                if (w->count < w->skip) {
@@ -1380,6 +1462,26 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
        fib6_walk(&c.w);
 }
 
+void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
+                   int prune, void *arg)
+{
+       struct fib6_table *table;
+       struct hlist_node *node;
+       struct hlist_head *head;
+       unsigned int h;
+
+       rcu_read_lock();
+       for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+               head = &net->ipv6.fib_table_hash[h];
+               hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
+                       read_lock_bh(&table->tb6_lock);
+                       fib6_clean_tree(net, &table->tb6_root,
+                                       func, prune, arg);
+                       read_unlock_bh(&table->tb6_lock);
+               }
+       }
+       rcu_read_unlock();
+}
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    int prune, void *arg)
 {
@@ -1439,8 +1541,8 @@ static int fib6_age(struct rt6_info *rt, void *arg)
         *      only if they are not in use now.
         */
 
-       if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
-               if (time_after(now, rt->rt6i_expires)) {
+       if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) {
+               if (time_after(now, rt->dst.expires)) {
                        RT6_TRACE("expiring %p\n", rt);
                        return -1;
                }
@@ -1451,7 +1553,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                        RT6_TRACE("aging clone %p\n", rt);
                        return -1;
                } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
-                          (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
+                          (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) {
                        RT6_TRACE("purging route %p via non-router but gateway\n",
                                  rt);
                        return -1;
index 4566dbd916d36a346bdf49b0f1b2b9e5c4e866e3..b7867a1215b1758a8b99b4f3261519f991a8986d 100644 (file)
@@ -386,7 +386,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                err = -EINVAL;
                goto done;
        }
-       ipv6_addr_copy(&fl->dst, &freq->flr_dst);
+       fl->dst = freq->flr_dst;
        atomic_set(&fl->users, 1);
        switch (fl->share) {
        case IPV6_FL_S_EXCL:
index 027c7ff6f1e5370bc0e84d8e3daeb996875a431c..1ca5d45a12e8973408ececb5ebd4e1daa3e391f9 100644 (file)
@@ -111,6 +111,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
            ipv6_addr_loopback(&hdr->daddr))
                goto err;
 
+       /*
+        * RFC4291 2.7
+        * Multicast addresses must not be used as source addresses in IPv6
+        * packets or appear in any Routing header.
+        */
+       if (ipv6_addr_is_multicast(&hdr->saddr))
+               goto err;
+
        skb->transport_header = skb->network_header + sizeof(*hdr);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
@@ -272,6 +280,7 @@ int ip6_mc_input(struct sk_buff *skb)
                        u8 *ptr = skb_network_header(skb) + opt->ra;
                        struct icmp6hdr *icmp6;
                        u8 nexthdr = hdr->nexthdr;
+                       __be16 frag_off;
                        int offset;
 
                        /* Check if the value of Router Alert
@@ -285,7 +294,7 @@ int ip6_mc_input(struct sk_buff *skb)
                                        goto out;
                                }
                                offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
-                                                         &nexthdr);
+                                                         &nexthdr, &frag_off);
                                if (offset < 0)
                                        goto out;
 
index 84d0bd5cac939814edaed4379f09464a958d61bf..d97e07183ce9c12123ece97e954004b912c88b30 100644 (file)
@@ -136,7 +136,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh) {
                int res = neigh_output(neigh, skb);
 
@@ -238,8 +238,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        hdr->nexthdr = proto;
        hdr->hop_limit = hlimit;
 
-       ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
-       ipv6_addr_copy(&hdr->daddr, first_hop);
+       hdr->saddr = fl6->saddr;
+       hdr->daddr = *first_hop;
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
@@ -290,8 +290,8 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
        hdr->nexthdr = proto;
        hdr->hop_limit = np->hop_limit;
 
-       ipv6_addr_copy(&hdr->saddr, saddr);
-       ipv6_addr_copy(&hdr->daddr, daddr);
+       hdr->saddr = *saddr;
+       hdr->daddr = *daddr;
 
        return 0;
 }
@@ -329,10 +329,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
 {
        struct ipv6hdr *hdr = ipv6_hdr(skb);
        u8 nexthdr = hdr->nexthdr;
+       __be16 frag_off;
        int offset;
 
        if (ipv6_ext_hdr(nexthdr)) {
-               offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
+               offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
                if (offset < 0)
                        return 0;
        } else
@@ -462,7 +463,7 @@ int ip6_forward(struct sk_buff *skb)
           send redirects to source routed frames.
           We don't send redirects to frames decapsulated from IPsec.
         */
-       n = dst_get_neighbour(dst);
+       n = dst_get_neighbour_noref(dst);
        if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
                struct in6_addr *target = NULL;
                struct rt6_info *rt;
@@ -603,7 +604,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        static atomic_t ipv6_fragmentation_id;
        int old, new;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                struct inet_peer *peer;
 
                if (!rt->rt6i_peer)
@@ -631,6 +632,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
        unsigned int mtu, hlen, left, len;
+       int hroom, troom;
        __be32 frag_id = 0;
        int ptr, offset = 0, err=0;
        u8 *prevhdr, nexthdr = 0;
@@ -797,6 +799,8 @@ slow_path:
         */
 
        *prevhdr = NEXTHDR_FRAGMENT;
+       hroom = LL_RESERVED_SPACE(rt->dst.dev);
+       troom = rt->dst.dev->needed_tailroom;
 
        /*
         *      Keep copying data until we run out.
@@ -815,7 +819,8 @@ slow_path:
                 *      Allocate buffer.
                 */
 
-               if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
+               if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+                                     hroom + troom, GFP_ATOMIC)) == NULL) {
                        NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
                        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                                      IPSTATS_MIB_FRAGFAILS);
@@ -828,7 +833,7 @@ slow_path:
                 */
 
                ip6_copy_metadata(frag, skb);
-               skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
+               skb_reserve(frag, hroom);
                skb_put(frag, len + hlen + sizeof(struct frag_hdr));
                skb_reset_network_header(frag);
                fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -978,7 +983,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         * dst entry of the nexthop router
         */
        rcu_read_lock();
-       n = dst_get_neighbour(*dst);
+       n = dst_get_neighbour_noref(*dst);
        if (n && !(n->nud_state & NUD_VALID)) {
                struct inet6_ifaddr *ifp;
                struct flowi6 fl_gw6;
@@ -1059,7 +1064,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (err)
                return ERR_PTR(err);
        if (final_dst)
-               ipv6_addr_copy(&fl6->daddr, final_dst);
+               fl6->daddr = *final_dst;
        if (can_sleep)
                fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
 
@@ -1095,7 +1100,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (err)
                return ERR_PTR(err);
        if (final_dst)
-               ipv6_addr_copy(&fl6->daddr, final_dst);
+               fl6->daddr = *final_dst;
        if (can_sleep)
                fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
 
@@ -1588,7 +1593,7 @@ int ip6_push_pending_frames(struct sock *sk)
        if (np->pmtudisc < IPV6_PMTUDISC_DO)
                skb->local_df = 1;
 
-       ipv6_addr_copy(final_dst, &fl6->daddr);
+       *final_dst = fl6->daddr;
        __skb_pull(skb, skb_network_header_len(skb));
        if (opt && opt->opt_flen)
                ipv6_push_frag_opts(skb, opt, &proto);
@@ -1604,8 +1609,8 @@ int ip6_push_pending_frames(struct sock *sk)
 
        hdr->hop_limit = np->cork.hop_limit;
        hdr->nexthdr = proto;
-       ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
-       ipv6_addr_copy(&hdr->daddr, final_dst);
+       hdr->saddr = fl6->saddr;
+       hdr->daddr = *final_dst;
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
index bdc15c9003d781fbdb3d0288cf8b88aa84f71742..e1f7761815f36903687dd45f0da702c86ec4f159 100644 (file)
@@ -93,7 +93,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
 {
@@ -289,6 +289,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
        if ((err = register_netdevice(dev)) < 0)
                goto failed_free;
 
+       strcpy(t->parms.name, dev->name);
+
        dev_hold(dev);
        ip6_tnl_link(ip6n, t);
        return t;
@@ -651,8 +653,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
                                NULL, 0, 0);
 
-               if (rt && rt->rt6i_dev)
-                       skb2->dev = rt->rt6i_dev;
+               if (rt && rt->dst.dev)
+                       skb2->dev = rt->dst.dev;
 
                icmpv6_send(skb2, rel_type, rel_code, rel_info);
 
@@ -977,8 +979,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
-       ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
-       ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
+       ipv6h->saddr = fl6->saddr;
+       ipv6h->daddr = fl6->daddr;
        nf_reset(skb);
        pkt_len = skb->len;
        err = ip6_local_out(skb);
@@ -1153,8 +1155,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
        memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
 
        /* Set up flowi template */
-       ipv6_addr_copy(&fl6->saddr, &p->laddr);
-       ipv6_addr_copy(&fl6->daddr, &p->raddr);
+       fl6->saddr = p->laddr;
+       fl6->daddr = p->raddr;
        fl6->flowi6_oif = p->link;
        fl6->flowlabel = 0;
 
@@ -1183,11 +1185,11 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
                if (rt == NULL)
                        return;
 
-               if (rt->rt6i_dev) {
-                       dev->hard_header_len = rt->rt6i_dev->hard_header_len +
+               if (rt->dst.dev) {
+                       dev->hard_header_len = rt->dst.dev->hard_header_len +
                                sizeof (struct ipv6hdr);
 
-                       dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
+                       dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr);
                        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                                dev->mtu-=8;
 
@@ -1210,8 +1212,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
 static int
 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
 {
-       ipv6_addr_copy(&t->parms.laddr, &p->laddr);
-       ipv6_addr_copy(&t->parms.raddr, &p->raddr);
+       t->parms.laddr = p->laddr;
+       t->parms.raddr = p->raddr;
        t->parms.flags = p->flags;
        t->parms.hop_limit = p->hop_limit;
        t->parms.encap_limit = p->encap_limit;
@@ -1407,7 +1409,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
 
        t->dev = dev;
-       strcpy(t->parms.name, dev->name);
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
@@ -1487,6 +1488,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
 static int __net_init ip6_tnl_init_net(struct net *net)
 {
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+       struct ip6_tnl *t = NULL;
        int err;
 
        ip6n->tnls[0] = ip6n->tnls_wc;
@@ -1507,6 +1509,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
        err = register_netdev(ip6n->fb_tnl_dev);
        if (err < 0)
                goto err_register;
+
+       t = netdev_priv(ip6n->fb_tnl_dev);
+
+       strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
        return 0;
 
 err_register:
index 449a9185b8f228531ae0e10949fa755b2e3b655a..c7e95c8c579f6e71848e3857d18392e5983199c3 100644 (file)
@@ -1105,8 +1105,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
                msg->im6_msgtype = MRT6MSG_WHOLEPKT;
                msg->im6_mif = mrt->mroute_reg_vif_num;
                msg->im6_pad = 0;
-               ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
-               ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+               msg->im6_src = ipv6_hdr(pkt)->saddr;
+               msg->im6_dst = ipv6_hdr(pkt)->daddr;
 
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else
@@ -1131,8 +1131,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
        msg->im6_msgtype = assert;
        msg->im6_mif = mifi;
        msg->im6_pad = 0;
-       ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
-       ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+       msg->im6_src = ipv6_hdr(pkt)->saddr;
+       msg->im6_dst = ipv6_hdr(pkt)->daddr;
 
        skb_dst_set(skb, dst_clone(skb_dst(pkt)));
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2181,8 +2181,8 @@ int ip6mr_get_route(struct net *net,
                iph->payload_len = 0;
                iph->nexthdr = IPPROTO_NONE;
                iph->hop_limit = 0;
-               ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
-               ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
+               iph->saddr = rt->rt6i_src.addr;
+               iph->daddr = rt->rt6i_dst.addr;
 
                err = ip6mr_cache_unresolved(mrt, vif, skb2);
                read_unlock(&mrt_lock);
index c99e3ee9781f246f82185e8a0e246e02a258cf82..18a2719003c378e95a02dacf5f71023380244739 100644 (file)
@@ -435,7 +435,7 @@ sticky_done:
                        goto e_inval;
 
                np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
-               ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr);
+               np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
                retv = 0;
                break;
        }
@@ -503,7 +503,7 @@ done:
                        goto e_inval;
                if (val > 255 || val < -1)
                        goto e_inval;
-               np->mcast_hops = val;
+               np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
                retv = 0;
                break;
 
@@ -980,8 +980,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
-                                       ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxhlim) {
@@ -992,8 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
-                                       ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxohlim) {
index ee7839f4d6e3450c222f2b2677d3b9cf0709d21e..b853f06cc14874759e303041426b6adc14e19abc 100644 (file)
@@ -155,14 +155,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
                return -ENOMEM;
 
        mc_lst->next = NULL;
-       ipv6_addr_copy(&mc_lst->addr, addr);
+       mc_lst->addr = *addr;
 
        rcu_read_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
                rt = rt6_lookup(net, addr, NULL, 0, 0);
                if (rt) {
-                       dev = rt->rt6i_dev;
+                       dev = rt->dst.dev;
                        dst_release(&rt->dst);
                }
        } else
@@ -256,7 +256,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
                struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
 
                if (rt) {
-                       dev = rt->rt6i_dev;
+                       dev = rt->dst.dev;
                        dev_hold(dev);
                        dst_release(&rt->dst);
                }
@@ -858,7 +858,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
 
        setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
 
-       ipv6_addr_copy(&mc->mca_addr, addr);
+       mc->mca_addr = *addr;
        mc->idev = idev; /* (reference taken) */
        mc->mca_users = 1;
        /* mca_stamp should be updated upon changes */
@@ -1343,13 +1343,15 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
        struct mld2_report *pmr;
        struct in6_addr addr_buf;
        const struct in6_addr *saddr;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
        int err;
        u8 ra[8] = { IPPROTO_ICMPV6, 0,
                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
                     IPV6_TLV_PADN, 0 };
 
        /* we assume size > sizeof(ra) here */
-       size += LL_ALLOCATED_SPACE(dev);
+       size += hlen + tlen;
        /* limit our allocations to order-0 page */
        size = min_t(int, size, SKB_MAX_ORDER(0, 0));
        skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1357,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
        if (!skb)
                return NULL;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
@@ -1408,18 +1410,11 @@ static void mld_sendpack(struct sk_buff *skb)
                                           csum_partial(skb_transport_header(skb),
                                                        mldlen, 0));
 
-       dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-
-       if (!dst) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
        icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
+       dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
 
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
        err = 0;
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
@@ -1723,6 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        struct mld_msg *hdr;
        const struct in6_addr *snd_addr, *saddr;
        struct in6_addr addr_buf;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
        int err, len, payload_len, full_len;
        u8 ra[8] = { IPPROTO_ICMPV6, 0,
                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
@@ -1744,7 +1741,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
                      IPSTATS_MIB_OUT, full_len);
        rcu_read_unlock();
 
-       skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
+       skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
 
        if (skb == NULL) {
                rcu_read_lock();
@@ -1754,7 +1751,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
                return;
        }
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
@@ -1772,7 +1769,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
        memset(hdr, 0, sizeof(struct mld_msg));
        hdr->mld_type = type;
-       ipv6_addr_copy(&hdr->mld_mca, addr);
+       hdr->mld_mca = *addr;
 
        hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
                                         IPPROTO_ICMPV6,
@@ -1781,17 +1778,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        rcu_read_lock();
        idev = __in6_dev_get(skb->dev);
 
-       dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-       if (!dst) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
        icmpv6_flow_init(sk, &fl6, type,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
-
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
                goto err_out;
@@ -1914,7 +1904,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  * Add multicast single-source filter to the interface list
  */
 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
-       const struct in6_addr *psfsrc, int delta)
+       const struct in6_addr *psfsrc)
 {
        struct ip6_sf_list *psf, *psf_prev;
 
@@ -2045,7 +2035,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
                pmc->mca_sfcount[sfmode]++;
        err = 0;
        for (i=0; i<sfcount; i++) {
-               err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+               err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
                if (err)
                        break;
        }
index 43242e6e610301c2a662932f3f910ab6ed6e573d..7e1e0fbfef21f1f17f076d2445e62e5d2cd303af 100644 (file)
@@ -195,8 +195,8 @@ static inline int mip6_report_rl_allow(struct timeval *stamp,
                mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
                mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
                mip6_report_rl.iif = iif;
-               ipv6_addr_copy(&mip6_report_rl.src, src);
-               ipv6_addr_copy(&mip6_report_rl.dst, dst);
+               mip6_report_rl.src = *src;
+               mip6_report_rl.dst = *dst;
                allow = 1;
        }
        spin_unlock_bh(&mip6_report_rl.lock);
index 44e5b7f2a6c1badcbf4dbb5ed2a844ae2daa199c..d8f02ef88e59b265322890025aa22e51815abb3a 100644 (file)
@@ -93,7 +93,7 @@
 
 static u32 ndisc_hash(const void *pkey,
                      const struct net_device *dev,
-                     __u32 rnd);
+                     __u32 *hash_rnd);
 static int ndisc_constructor(struct neighbour *neigh);
 static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -126,7 +126,6 @@ static const struct neigh_ops ndisc_direct_ops = {
 
 struct neigh_table nd_tbl = {
        .family =       AF_INET6,
-       .entry_size =   sizeof(struct neighbour) + sizeof(struct in6_addr),
        .key_len =      sizeof(struct in6_addr),
        .hash =         ndisc_hash,
        .constructor =  ndisc_constructor,
@@ -141,7 +140,7 @@ struct neigh_table nd_tbl = {
                .gc_staletime           = 60 * HZ,
                .reachable_time         = ND_REACHABLE_TIME,
                .delay_probe_time       = 5 * HZ,
-               .queue_len              = 3,
+               .queue_len_bytes        = 64*1024,
                .ucast_probes           = 3,
                .mcast_probes           = 3,
                .anycast_delay          = 1 * HZ,
@@ -350,16 +349,9 @@ EXPORT_SYMBOL(ndisc_mc_map);
 
 static u32 ndisc_hash(const void *pkey,
                      const struct net_device *dev,
-                     __u32 hash_rnd)
+                     __u32 *hash_rnd)
 {
-       const u32 *p32 = pkey;
-       u32 addr_hash, i;
-
-       addr_hash = 0;
-       for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++)
-               addr_hash ^= *p32++;
-
-       return jhash_2words(addr_hash, dev->ifindex, hash_rnd);
+       return ndisc_hashfn(pkey, dev, hash_rnd);
 }
 
 static int ndisc_constructor(struct neighbour *neigh)
@@ -446,6 +438,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
        struct sock *sk = net->ipv6.ndisc_sk;
        struct sk_buff *skb;
        struct icmp6hdr *hdr;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
        int len;
        int err;
        u8 *opt;
@@ -459,7 +453,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
 
        skb = sock_alloc_send_skb(sk,
                                  (MAX_HEADER + sizeof(struct ipv6hdr) +
-                                  len + LL_ALLOCATED_SPACE(dev)),
+                                  len + hlen + tlen),
                                  1, &err);
        if (!skb) {
                ND_PRINTK0(KERN_ERR
@@ -468,7 +462,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
                return NULL;
        }
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
 
        skb->transport_header = skb->tail;
@@ -479,7 +473,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
 
        opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
        if (target) {
-               ipv6_addr_copy((struct in6_addr *)opt, target);
+               *(struct in6_addr *)opt = *target;
                opt += sizeof(*target);
        }
 
@@ -515,14 +509,7 @@ void ndisc_send_skb(struct sk_buff *skb,
        type = icmp6h->icmp6_type;
 
        icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex);
-
-       dst = icmp6_dst_alloc(dev, neigh, daddr);
-       if (!dst) {
-               kfree_skb(skb);
-               return;
-       }
-
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       dst = icmp6_dst_alloc(dev, neigh, &fl6);
        if (IS_ERR(dst)) {
                kfree_skb(skb);
                return;
@@ -1237,7 +1224,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
 
        if (rt)
-               neigh = dst_get_neighbour(&rt->dst);
+               neigh = dst_get_neighbour_noref(&rt->dst);
 
        if (rt && lifetime == 0) {
                neigh_clone(neigh);
@@ -1257,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                        return;
                }
 
-               neigh = dst_get_neighbour(&rt->dst);
+               neigh = dst_get_neighbour_noref(&rt->dst);
                if (neigh == NULL) {
                        ND_PRINTK0(KERN_ERR
                                   "ICMPv6 RA: %s() got default router without neighbour.\n",
@@ -1271,7 +1258,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        }
 
        if (rt)
-               rt->rt6i_expires = jiffies + (HZ * lifetime);
+               rt->dst.expires = jiffies + (HZ * lifetime);
 
        if (ra_msg->icmph.icmp6_hop_limit) {
                in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
@@ -1381,7 +1368,9 @@ skip_routeinfo:
                for (p = ndopts.nd_opts_pi;
                     p;
                     p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) {
-                       addrconf_prefix_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3);
+                       addrconf_prefix_rcv(skb->dev, (u8 *)p,
+                                           (p->nd_opt_len) << 3,
+                                           ndopts.nd_opts_src_lladdr != NULL);
                }
        }
 
@@ -1533,6 +1522,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        struct inet6_dev *idev;
        struct flowi6 fl6;
        u8 *opt;
+       int hlen, tlen;
        int rd_len;
        int err;
        u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
@@ -1571,7 +1561,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        }
        if (!rt->rt6i_peer)
                rt6_bind_peer(rt, 1);
-       if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+       if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
                goto release;
 
        if (dev->addr_len) {
@@ -1590,9 +1580,11 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        rd_len &= ~0x7;
        len += rd_len;
 
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
        buff = sock_alloc_send_skb(sk,
                                   (MAX_HEADER + sizeof(struct ipv6hdr) +
-                                   len + LL_ALLOCATED_SPACE(dev)),
+                                   len + hlen + tlen),
                                   1, &err);
        if (buff == NULL) {
                ND_PRINTK0(KERN_ERR
@@ -1601,7 +1593,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
                goto release;
        }
 
-       skb_reserve(buff, LL_RESERVED_SPACE(dev));
+       skb_reserve(buff, hlen);
        ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
                   IPPROTO_ICMPV6, len);
 
@@ -1617,9 +1609,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
         */
 
        addrp = (struct in6_addr *)(icmph + 1);
-       ipv6_addr_copy(addrp, target);
+       *addrp = *target;
        addrp++;
-       ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr);
+       *addrp = ipv6_hdr(skb)->daddr;
 
        opt = (u8*) (addrp + 1);
 
index 448464844a253474fed5624f75c68c9bb33cd98e..9a68fb5b9e77f47a2d8fa89d4d234edff805526b 100644 (file)
@@ -125,6 +125,16 @@ config IP6_NF_MATCH_MH
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP6_NF_MATCH_RPFILTER
+       tristate '"rpfilter" reverse path filter match support'
+       depends on NETFILTER_ADVANCED
+       ---help---
+         This option allows you to match packets whose replies would
+         go out via the interface the packet came in.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+         The module will be called ip6t_rpfilter.
+
 config IP6_NF_MATCH_RT
        tristate '"rt" Routing header match support'
        depends on NETFILTER_ADVANCED
@@ -186,7 +196,6 @@ config IP6_NF_MANGLE
 
 config IP6_NF_RAW
        tristate  'raw table support (required for TRACE)'
-       depends on NETFILTER_ADVANCED
        help
          This option adds a `raw' table to ip6tables. This table is the very
          first in the netfilter framework and hooks in at the PREROUTING
index abfee91ce816a09ef88c065b4fb66643713a46c9..2eaed96db02c4fe203efedad2a2b6c20350781ff 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
 obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
+obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 
 # targets
index e63c3972a739eef92baef9897e6d4618670ec9b5..fb80a23c6640a15bd3ee32ef2c29884dea9b0f3d 100644 (file)
@@ -405,6 +405,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
        int status, type, pid, flags;
        unsigned int nlmsglen, skblen;
        struct nlmsghdr *nlh;
+       bool enable_timestamp = false;
 
        skblen = skb->len;
        if (skblen < sizeof(*nlh))
@@ -442,11 +443,13 @@ __ipq_rcv_skb(struct sk_buff *skb)
                        RCV_SKB_FAIL(-EBUSY);
                }
        } else {
-               net_enable_timestamp();
+               enable_timestamp = true;
                peer_pid = pid;
        }
 
        spin_unlock_bh(&queue_lock);
+       if (enable_timestamp)
+               net_enable_timestamp();
 
        status = ipq_receive_peer(NLMSG_DATA(nlh), type,
                                  nlmsglen - NLMSG_LENGTH(0));
index a5a4c5dd53961cf1b2432c8317592d1099eafda2..aad2fa41cf460041578f06b4eff7b6d6f3417054 100644 (file)
@@ -49,6 +49,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        const __u8 tclass = DEFAULT_TOS_VALUE;
        struct dst_entry *dst = NULL;
        u8 proto;
+       __be16 frag_off;
        struct flowi6 fl6;
 
        if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
@@ -58,7 +59,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        }
 
        proto = oip6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
+       tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
 
        if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
                pr_debug("Cannot get TCP header.\n");
@@ -93,8 +94,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.saddr, &oip6h->daddr);
-       ipv6_addr_copy(&fl6.daddr, &oip6h->saddr);
+       fl6.saddr = oip6h->daddr;
+       fl6.daddr = oip6h->saddr;
        fl6.fl6_sport = otcph.dest;
        fl6.fl6_dport = otcph.source;
        security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
@@ -129,8 +130,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        *(__be32 *)ip6h =  htonl(0x60000000 | (tclass << 20));
        ip6h->hop_limit = ip6_dst_hoplimit(dst);
        ip6h->nexthdr = IPPROTO_TCP;
-       ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
-       ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
+       ip6h->saddr = oip6h->daddr;
+       ip6h->daddr = oip6h->saddr;
 
        tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
        /* Truncate to length (no data) */
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
new file mode 100644 (file)
index 0000000..5d1d8b0
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2011 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+
+#include <linux/netfilter/xt_rpfilter.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("Xtables: IPv6 reverse path filter match");
+
+static bool rpfilter_addr_unicast(const struct in6_addr *addr)
+{
+       int addr_type = ipv6_addr_type(addr);
+       return addr_type & IPV6_ADDR_UNICAST;
+}
+
+static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
+                                    const struct net_device *dev, u8 flags)
+{
+       struct rt6_info *rt;
+       struct ipv6hdr *iph = ipv6_hdr(skb);
+       bool ret = false;
+       struct flowi6 fl6 = {
+               .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
+               .flowi6_proto = iph->nexthdr,
+               .daddr = iph->saddr,
+       };
+       int lookup_flags;
+
+       if (rpfilter_addr_unicast(&iph->daddr)) {
+               memcpy(&fl6.saddr, &iph->daddr, sizeof(struct in6_addr));
+               lookup_flags = RT6_LOOKUP_F_HAS_SADDR;
+       } else {
+               lookup_flags = 0;
+       }
+
+       fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+       if ((flags & XT_RPFILTER_LOOSE) == 0) {
+               fl6.flowi6_oif = dev->ifindex;
+               lookup_flags |= RT6_LOOKUP_F_IFACE;
+       }
+
+       rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags);
+       if (rt->dst.error)
+               goto out;
+
+       if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST))
+               goto out;
+
+       if (rt->rt6i_flags & RTF_LOCAL) {
+               ret = flags & XT_RPFILTER_ACCEPT_LOCAL;
+               goto out;
+       }
+
+       if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+               ret = true;
+ out:
+       dst_release(&rt->dst);
+       return ret;
+}
+
+static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_rpfilter_info *info = par->matchinfo;
+       int saddrtype;
+       struct ipv6hdr *iph;
+       bool invert = info->flags & XT_RPFILTER_INVERT;
+
+       if (par->in->flags & IFF_LOOPBACK)
+               return true ^ invert;
+
+       iph = ipv6_hdr(skb);
+       saddrtype = ipv6_addr_type(&iph->saddr);
+       if (unlikely(saddrtype == IPV6_ADDR_ANY))
+               return true ^ invert; /* not routable: forward path will drop it */
+
+       return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert;
+}
+
+static int rpfilter_check(const struct xt_mtchk_param *par)
+{
+       const struct xt_rpfilter_info *info = par->matchinfo;
+       unsigned int options = ~XT_RPFILTER_OPTION_MASK;
+
+       if (info->flags & options) {
+               pr_info("unknown options encountered");
+               return -EINVAL;
+       }
+
+       if (strcmp(par->table, "mangle") != 0 &&
+           strcmp(par->table, "raw") != 0) {
+               pr_info("match only valid in the \'raw\' "
+                       "or \'mangle\' tables, not \'%s\'.\n", par->table);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct xt_match rpfilter_mt_reg __read_mostly = {
+       .name           = "rpfilter",
+       .family         = NFPROTO_IPV6,
+       .checkentry     = rpfilter_check,
+       .match          = rpfilter_mt,
+       .matchsize      = sizeof(struct xt_rpfilter_info),
+       .hooks          = (1 << NF_INET_PRE_ROUTING),
+       .me             = THIS_MODULE
+};
+
+static int __init rpfilter_mt_init(void)
+{
+       return xt_register_match(&rpfilter_mt_reg);
+}
+
+static void __exit rpfilter_mt_exit(void)
+{
+       xt_unregister_match(&rpfilter_mt_reg);
+}
+
+module_init(rpfilter_mt_init);
+module_exit(rpfilter_mt_exit);
index c9e37c8fd62c97ed0e2d3058f177a38b25a79348..a8f6da97e3b252368469065ce0159295eb00e96b 100644 (file)
@@ -44,7 +44,7 @@ ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
 static struct nf_hook_ops *filter_ops __read_mostly;
 
 /* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
 module_param(forward, bool, 0000);
 
 static int __net_init ip6table_filter_net_init(struct net *net)
index 1008ce94bc339e5a97405723170ea63d4fed6ba6..fdeb6d03da812d136874fccc870b90c69efee7b1 100644 (file)
@@ -142,11 +142,7 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
        SNMP_MIB_SENTINEL
 };
 
-/* can be called either with percpu mib (pcpumib != NULL),
- * or shared one (smib != NULL)
- */
-static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib,
-                                    atomic_long_t *smib)
+static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
 {
        char name[32];
        int i;
@@ -163,14 +159,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpum
                snprintf(name, sizeof(name), "Icmp6%s%s",
                        i & 0x100 ? "Out" : "In", p);
                seq_printf(seq, "%-32s\t%lu\n", name,
-                       pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i));
+                          atomic_long_read(smib + i));
        }
 
        /* print by number (nonzero only) - ICMPMsgStat format */
        for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
                unsigned long val;
 
-               val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i);
+               val = atomic_long_read(smib + i);
                if (!val)
                        continue;
                snprintf(name, sizeof(name), "Icmp6%sType%u",
@@ -215,8 +211,7 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
                            snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
        snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
                            NULL, snmp6_icmp6_list);
-       snmp6_seq_show_icmpv6msg(seq,
-                           (void __percpu **)net->mib.icmpv6msg_statistics, NULL);
+       snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
        snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
                            NULL, snmp6_udp6_list);
        snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
@@ -246,7 +241,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
                            snmp6_ipstats_list);
        snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
                            snmp6_icmp6_list);
-       snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs);
+       snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
        return 0;
 }
 
index 331af3b882ac211ca65cb59042f14c381fe2d244..a4894f4f1944e600d93a1f2a9cde719cfe528bb0 100644 (file)
@@ -299,9 +299,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        }
 
        inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-       ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+       np->rcv_saddr = addr->sin6_addr;
        if (!(addr_type & IPV6_ADDR_MULTICAST))
-               ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+               np->saddr = addr->sin6_addr;
        err = 0;
 out_unlock:
        rcu_read_unlock();
@@ -383,7 +383,8 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
        }
 
        /* Charge it to the socket. */
-       if (ip_queue_rcv_skb(sk, skb) < 0) {
+       skb_dst_drop(skb);
+       if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -494,7 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        if (sin6) {
                sin6->sin6_family = AF_INET6;
                sin6->sin6_port = 0;
-               ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+               sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                sin6->sin6_flowinfo = 0;
                sin6->sin6_scope_id = 0;
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -610,6 +611,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
        struct sk_buff *skb;
        int err;
        struct rt6_info *rt = (struct rt6_info *)*dstp;
+       int hlen = LL_RESERVED_SPACE(rt->dst.dev);
+       int tlen = rt->dst.dev->needed_tailroom;
 
        if (length > rt->dst.dev->mtu) {
                ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
@@ -619,11 +622,11 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
                goto out;
 
        skb = sock_alloc_send_skb(sk,
-                                 length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+                                 length + hlen + tlen + 15,
                                  flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto error;
-       skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+       skb_reserve(skb, hlen);
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
@@ -843,11 +846,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                goto out;
 
        if (!ipv6_addr_any(daddr))
-               ipv6_addr_copy(&fl6.daddr, daddr);
+               fl6.daddr = *daddr;
        else
                fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
        if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+               fl6.saddr = np->saddr;
 
        final_p = fl6_update_dst(&fl6, opt, &final);
 
index dfb164e9051aef20af12cba8072dae4dfdb058e5..b69fae76a6f1c18fa326a828b7b47fa8ade3b94c 100644 (file)
@@ -153,8 +153,8 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
 
        fq->id = arg->id;
        fq->user = arg->user;
-       ipv6_addr_copy(&fq->saddr, arg->src);
-       ipv6_addr_copy(&fq->daddr, arg->dst);
+       fq->saddr = *arg->src;
+       fq->daddr = *arg->dst;
 }
 EXPORT_SYMBOL(ip6_frag_init);
 
index 8473016bba4a8cd6ae4dfc420c7827524fc3a84b..07361dfa80852cbbe4db66027f8da5ef13ade4c1 100644 (file)
 #include <linux/sysctl.h>
 #endif
 
-/* Set to 3 to get tracing. */
-#define RT6_DEBUG 2
-
-#if RT6_DEBUG >= 3
-#define RDBG(x) printk x
-#define RT6_TRACE(x...) printk(KERN_DEBUG x)
-#else
-#define RDBG(x)
-#define RT6_TRACE(x...) do { ; } while (0)
-#endif
-
 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                                    const struct in6_addr *dest);
 static struct dst_entry        *ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ip6_default_advmss(const struct dst_entry *dst);
-static unsigned int     ip6_default_mtu(const struct dst_entry *dst);
+static unsigned int     ip6_mtu(const struct dst_entry *dst);
 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
 static void            ip6_dst_destroy(struct dst_entry *);
 static void            ip6_dst_ifdown(struct dst_entry *,
@@ -134,7 +123,23 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
 
 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
 {
-       return __neigh_lookup_errno(&nd_tbl, daddr, dst->dev);
+       struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
+       if (n)
+               return n;
+       return neigh_create(&nd_tbl, daddr, dst->dev);
+}
+
+static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
+{
+       struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway);
+       if (!n) {
+               n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev);
+               if (IS_ERR(n))
+                       return PTR_ERR(n);
+       }
+       dst_set_neighbour(&rt->dst, n);
+
+       return 0;
 }
 
 static struct dst_ops ip6_dst_ops_template = {
@@ -144,7 +149,7 @@ static struct dst_ops ip6_dst_ops_template = {
        .gc_thresh              =       1024,
        .check                  =       ip6_dst_check,
        .default_advmss         =       ip6_default_advmss,
-       .default_mtu            =       ip6_default_mtu,
+       .mtu                    =       ip6_mtu,
        .cow_metrics            =       ipv6_cow_metrics,
        .destroy                =       ip6_dst_destroy,
        .ifdown                 =       ip6_dst_ifdown,
@@ -155,9 +160,11 @@ static struct dst_ops ip6_dst_ops_template = {
        .neigh_lookup           =       ip6_neigh_lookup,
 };
 
-static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
 {
-       return 0;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -175,7 +182,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IPV6),
        .destroy                =       ip6_dst_destroy,
        .check                  =       ip6_dst_check,
-       .default_mtu            =       ip6_blackhole_default_mtu,
+       .mtu                    =       ip6_blackhole_mtu,
        .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
        .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
@@ -245,9 +252,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
 {
        struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
 
-       if (rt != NULL)
+       if (rt)
                memset(&rt->rt6i_table, 0,
-                       sizeof(*rt) - sizeof(struct dst_entry));
+                      sizeof(*rt) - sizeof(struct dst_entry));
 
        return rt;
 }
@@ -261,7 +268,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
        if (!(rt->dst.flags & DST_HOST))
                dst_destroy_metrics_generic(dst);
 
-       if (idev != NULL) {
+       if (idev) {
                rt->rt6i_idev = NULL;
                in6_dev_put(idev);
        }
@@ -297,10 +304,10 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
        struct net_device *loopback_dev =
                dev_net(dev)->loopback_dev;
 
-       if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
+       if (dev != loopback_dev && idev && idev->dev == dev) {
                struct inet6_dev *loopback_idev =
                        in6_dev_get(loopback_dev);
-               if (loopback_idev != NULL) {
+               if (loopback_idev) {
                        rt->rt6i_idev = loopback_idev;
                        in6_dev_put(idev);
                }
@@ -310,7 +317,7 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
 {
        return (rt->rt6i_flags & RTF_EXPIRES) &&
-               time_after(jiffies, rt->rt6i_expires);
+               time_after(jiffies, rt->dst.expires);
 }
 
 static inline int rt6_need_strict(const struct in6_addr *daddr)
@@ -336,13 +343,13 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
                goto out;
 
        for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
-               struct net_device *dev = sprt->rt6i_dev;
+               struct net_device *dev = sprt->dst.dev;
 
                if (oif) {
                        if (dev->ifindex == oif)
                                return sprt;
                        if (dev->flags & IFF_LOOPBACK) {
-                               if (sprt->rt6i_idev == NULL ||
+                               if (!sprt->rt6i_idev ||
                                    sprt->rt6i_idev->dev->ifindex != oif) {
                                        if (flags & RT6_LOOKUP_F_IFACE && oif)
                                                continue;
@@ -383,7 +390,7 @@ static void rt6_probe(struct rt6_info *rt)
         * to no more than one per minute.
         */
        rcu_read_lock();
-       neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
+       neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
        if (!neigh || (neigh->nud_state & NUD_VALID))
                goto out;
        read_lock_bh(&neigh->lock);
@@ -397,7 +404,7 @@ static void rt6_probe(struct rt6_info *rt)
 
                target = (struct in6_addr *)&neigh->primary_key;
                addrconf_addr_solict_mult(target, &mcaddr);
-               ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
+               ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
        } else {
                read_unlock_bh(&neigh->lock);
        }
@@ -415,7 +422,7 @@ static inline void rt6_probe(struct rt6_info *rt)
  */
 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
 {
-       struct net_device *dev = rt->rt6i_dev;
+       struct net_device *dev = rt->dst.dev;
        if (!oif || dev->ifindex == oif)
                return 2;
        if ((dev->flags & IFF_LOOPBACK) &&
@@ -430,7 +437,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
        int m;
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(&rt->dst);
+       neigh = dst_get_neighbour_noref(&rt->dst);
        if (rt->rt6i_flags & RTF_NONEXTHOP ||
            !(rt->rt6i_flags & RTF_GATEWAY))
                m = 1;
@@ -516,9 +523,6 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
        struct rt6_info *match, *rt0;
        struct net *net;
 
-       RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
-                 __func__, fn->leaf, oif);
-
        rt0 = fn->rr_ptr;
        if (!rt0)
                fn->rr_ptr = rt0 = fn->leaf;
@@ -537,10 +541,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
                        fn->rr_ptr = next;
        }
 
-       RT6_TRACE("%s() => %p\n",
-                 __func__, match);
-
-       net = dev_net(rt0->rt6i_dev);
+       net = dev_net(rt0->dst.dev);
        return match ? match : net->ipv6.ip6_null_entry;
 }
 
@@ -609,7 +610,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
                if (!addrconf_finite_timeout(lifetime)) {
                        rt->rt6i_flags &= ~RTF_EXPIRES;
                } else {
-                       rt->rt6i_expires = jiffies + HZ * lifetime;
+                       rt->dst.expires = jiffies + HZ * lifetime;
                        rt->rt6i_flags |= RTF_EXPIRES;
                }
                dst_release(&rt->dst);
@@ -634,7 +635,7 @@ do { \
                                goto restart; \
                } \
        } \
-} while(0)
+} while (0)
 
 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
                                             struct fib6_table *table,
@@ -656,6 +657,13 @@ out:
 
 }
 
+struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+                                   int flags)
+{
+       return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
+}
+EXPORT_SYMBOL_GPL(ip6_route_lookup);
+
 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
                            const struct in6_addr *saddr, int oif, int strict)
 {
@@ -704,7 +712,7 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
 int ip6_ins_rt(struct rt6_info *rt)
 {
        struct nl_info info = {
-               .nl_net = dev_net(rt->rt6i_dev),
+               .nl_net = dev_net(rt->dst.dev),
        };
        return __ip6_ins_rt(rt, &info);
 }
@@ -722,29 +730,27 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
        rt = ip6_rt_copy(ort, daddr);
 
        if (rt) {
-               struct neighbour *neigh;
                int attempts = !in_softirq();
 
-               if (!(rt->rt6i_flags&RTF_GATEWAY)) {
-                       if (rt->rt6i_dst.plen != 128 &&
+               if (!(rt->rt6i_flags & RTF_GATEWAY)) {
+                       if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
-                       ipv6_addr_copy(&rt->rt6i_gateway, daddr);
+                       rt->rt6i_gateway = *daddr;
                }
 
                rt->rt6i_flags |= RTF_CACHE;
 
 #ifdef CONFIG_IPV6_SUBTREES
                if (rt->rt6i_src.plen && saddr) {
-                       ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
+                       rt->rt6i_src.addr = *saddr;
                        rt->rt6i_src.plen = 128;
                }
 #endif
 
        retry:
-               neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
-               if (IS_ERR(neigh)) {
-                       struct net *net = dev_net(rt->rt6i_dev);
+               if (rt6_bind_neighbour(rt, rt->dst.dev)) {
+                       struct net *net = dev_net(rt->dst.dev);
                        int saved_rt_min_interval =
                                net->ipv6.sysctl.ip6_rt_gc_min_interval;
                        int saved_rt_elasticity =
@@ -769,8 +775,6 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
                        dst_free(&rt->dst);
                        return NULL;
                }
-               dst_set_neighbour(&rt->dst, neigh);
-
        }
 
        return rt;
@@ -783,7 +787,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
 
        if (rt) {
                rt->rt6i_flags |= RTF_CACHE;
-               dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
+               dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
        }
        return rt;
 }
@@ -817,7 +821,7 @@ restart:
        dst_hold(&rt->dst);
        read_unlock_bh(&table->tb6_lock);
 
-       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
        else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -873,7 +877,7 @@ void ip6_route_input(struct sk_buff *skb)
                .flowi6_iif = skb->dev->ifindex,
                .daddr = iph->daddr,
                .saddr = iph->saddr,
-               .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
+               .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
                .flowi6_mark = skb->mark,
                .flowi6_proto = iph->nexthdr,
        };
@@ -930,9 +934,9 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
                rt->rt6i_idev = ort->rt6i_idev;
                if (rt->rt6i_idev)
                        in6_dev_hold(rt->rt6i_idev);
-               rt->rt6i_expires = 0;
+               rt->dst.expires = 0;
 
-               ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+               rt->rt6i_gateway = ort->rt6i_gateway;
                rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
                rt->rt6i_metric = 0;
 
@@ -995,7 +999,7 @@ static void ip6_link_failure(struct sk_buff *skb)
 
        rt = (struct rt6_info *) skb_dst(skb);
        if (rt) {
-               if (rt->rt6i_flags&RTF_CACHE) {
+               if (rt->rt6i_flags & RTF_CACHE) {
                        dst_set_expires(&rt->dst, 0);
                        rt->rt6i_flags |= RTF_EXPIRES;
                } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
@@ -1041,10 +1045,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
        return mtu;
 }
 
-static unsigned int ip6_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_mtu(const struct dst_entry *dst)
 {
-       unsigned int mtu = IPV6_MIN_MTU;
        struct inet6_dev *idev;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       if (mtu)
+               return mtu;
+
+       mtu = IPV6_MIN_MTU;
 
        rcu_read_lock();
        idev = __in6_dev_get(dst->dev);
@@ -1060,34 +1069,38 @@ static DEFINE_SPINLOCK(icmp6_dst_lock);
 
 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                                  struct neighbour *neigh,
-                                 const struct in6_addr *addr)
+                                 struct flowi6 *fl6)
 {
+       struct dst_entry *dst;
        struct rt6_info *rt;
        struct inet6_dev *idev = in6_dev_get(dev);
        struct net *net = dev_net(dev);
 
-       if (unlikely(idev == NULL))
+       if (unlikely(!idev))
                return NULL;
 
        rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
-       if (unlikely(rt == NULL)) {
+       if (unlikely(!rt)) {
                in6_dev_put(idev);
+               dst = ERR_PTR(-ENOMEM);
                goto out;
        }
 
        if (neigh)
                neigh_hold(neigh);
        else {
-               neigh = ndisc_get_neigh(dev, addr);
-               if (IS_ERR(neigh))
-                       neigh = NULL;
+               neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
+               if (IS_ERR(neigh)) {
+                       dst_free(&rt->dst);
+                       return ERR_CAST(neigh);
+               }
        }
 
        rt->dst.flags |= DST_HOST;
        rt->dst.output  = ip6_output;
        dst_set_neighbour(&rt->dst, neigh);
        atomic_set(&rt->dst.__refcnt, 1);
-       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+       rt->rt6i_dst.addr = fl6->daddr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_idev     = idev;
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
@@ -1099,8 +1112,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 
        fib6_force_start_gc(net);
 
+       dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
+
 out:
-       return &rt->dst;
+       return dst;
 }
 
 int icmp6_dst_gc(void)
@@ -1230,21 +1245,30 @@ int ip6_route_add(struct fib6_config *cfg)
        if (cfg->fc_metric == 0)
                cfg->fc_metric = IP6_RT_PRIO_USER;
 
-       table = fib6_new_table(net, cfg->fc_table);
-       if (table == NULL) {
-               err = -ENOBUFS;
-               goto out;
+       err = -ENOBUFS;
+       if (cfg->fc_nlinfo.nlh &&
+           !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
+               table = fib6_get_table(net, cfg->fc_table);
+               if (!table) {
+                       printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+                       table = fib6_new_table(net, cfg->fc_table);
+               }
+       } else {
+               table = fib6_new_table(net, cfg->fc_table);
        }
 
+       if (!table)
+               goto out;
+
        rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
 
-       if (rt == NULL) {
+       if (!rt) {
                err = -ENOMEM;
                goto out;
        }
 
        rt->dst.obsolete = -1;
-       rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
+       rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
                                jiffies + clock_t_to_jiffies(cfg->fc_expires) :
                                0;
 
@@ -1287,8 +1311,9 @@ int ip6_route_add(struct fib6_config *cfg)
           they would result in kernel looping; promote them to reject routes
         */
        if ((cfg->fc_flags & RTF_REJECT) ||
-           (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
-                                             && !(cfg->fc_flags&RTF_LOCAL))) {
+           (dev && (dev->flags & IFF_LOOPBACK) &&
+            !(addr_type & IPV6_ADDR_LOOPBACK) &&
+            !(cfg->fc_flags & RTF_LOCAL))) {
                /* hold loopback dev/idev if we haven't done so. */
                if (dev != net->loopback_dev) {
                        if (dev) {
@@ -1315,7 +1340,7 @@ int ip6_route_add(struct fib6_config *cfg)
                int gwa_type;
 
                gw_addr = &cfg->fc_gateway;
-               ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
+               rt->rt6i_gateway = *gw_addr;
                gwa_type = ipv6_addr_type(gw_addr);
 
                if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
@@ -1329,26 +1354,26 @@ int ip6_route_add(struct fib6_config *cfg)
                           some exceptions. --ANK
                         */
                        err = -EINVAL;
-                       if (!(gwa_type&IPV6_ADDR_UNICAST))
+                       if (!(gwa_type & IPV6_ADDR_UNICAST))
                                goto out;
 
                        grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
 
                        err = -EHOSTUNREACH;
-                       if (grt == NULL)
+                       if (!grt)
                                goto out;
                        if (dev) {
-                               if (dev != grt->rt6i_dev) {
+                               if (dev != grt->dst.dev) {
                                        dst_release(&grt->dst);
                                        goto out;
                                }
                        } else {
-                               dev = grt->rt6i_dev;
+                               dev = grt->dst.dev;
                                idev = grt->rt6i_idev;
                                dev_hold(dev);
                                in6_dev_hold(grt->rt6i_idev);
                        }
-                       if (!(grt->rt6i_flags&RTF_GATEWAY))
+                       if (!(grt->rt6i_flags & RTF_GATEWAY))
                                err = 0;
                        dst_release(&grt->dst);
 
@@ -1356,12 +1381,12 @@ int ip6_route_add(struct fib6_config *cfg)
                                goto out;
                }
                err = -EINVAL;
-               if (dev == NULL || (dev->flags&IFF_LOOPBACK))
+               if (!dev || (dev->flags & IFF_LOOPBACK))
                        goto out;
        }
 
        err = -ENODEV;
-       if (dev == NULL)
+       if (!dev)
                goto out;
 
        if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
@@ -1369,18 +1394,15 @@ int ip6_route_add(struct fib6_config *cfg)
                        err = -EINVAL;
                        goto out;
                }
-               ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
+               rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
                rt->rt6i_prefsrc.plen = 128;
        } else
                rt->rt6i_prefsrc.plen = 0;
 
        if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
-               struct neighbour *n = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
-               if (IS_ERR(n)) {
-                       err = PTR_ERR(n);
+               err = rt6_bind_neighbour(rt, dev);
+               if (err)
                        goto out;
-               }
-               dst_set_neighbour(&rt->dst, n);
        }
 
        rt->rt6i_flags = cfg->fc_flags;
@@ -1426,7 +1448,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
 {
        int err;
        struct fib6_table *table;
-       struct net *net = dev_net(rt->rt6i_dev);
+       struct net *net = dev_net(rt->dst.dev);
 
        if (rt == net->ipv6.ip6_null_entry)
                return -ENOENT;
@@ -1445,7 +1467,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
 int ip6_del_rt(struct rt6_info *rt)
 {
        struct nl_info info = {
-               .nl_net = dev_net(rt->rt6i_dev),
+               .nl_net = dev_net(rt->dst.dev),
        };
        return __ip6_del_rt(rt, &info);
 }
@@ -1458,7 +1480,7 @@ static int ip6_route_del(struct fib6_config *cfg)
        int err = -ESRCH;
 
        table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
-       if (table == NULL)
+       if (!table)
                return err;
 
        read_lock_bh(&table->tb6_lock);
@@ -1470,8 +1492,8 @@ static int ip6_route_del(struct fib6_config *cfg)
        if (fn) {
                for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
                        if (cfg->fc_ifindex &&
-                           (rt->rt6i_dev == NULL ||
-                            rt->rt6i_dev->ifindex != cfg->fc_ifindex))
+                           (!rt->dst.dev ||
+                            rt->dst.dev->ifindex != cfg->fc_ifindex))
                                continue;
                        if (cfg->fc_flags & RTF_GATEWAY &&
                            !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
@@ -1533,7 +1555,7 @@ restart:
                        continue;
                if (!(rt->rt6i_flags & RTF_GATEWAY))
                        continue;
-               if (fl6->flowi6_oif != rt->rt6i_dev->ifindex)
+               if (fl6->flowi6_oif != rt->dst.dev->ifindex)
                        continue;
                if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
                        continue;
@@ -1566,7 +1588,7 @@ static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
                },
        };
 
-       ipv6_addr_copy(&rdfl.gateway, gateway);
+       rdfl.gateway = *gateway;
 
        if (rt6_need_strict(dest))
                flags |= RT6_LOOKUP_F_IFACE;
@@ -1611,18 +1633,18 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        dst_confirm(&rt->dst);
 
        /* Duplicate redirect: silently ignore. */
-       if (neigh == dst_get_neighbour_raw(&rt->dst))
+       if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
                goto out;
 
        nrt = ip6_rt_copy(rt, dest);
-       if (nrt == NULL)
+       if (!nrt)
                goto out;
 
        nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
        if (on_link)
                nrt->rt6i_flags &= ~RTF_GATEWAY;
 
-       ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
+       nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
        dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
 
        if (ip6_ins_rt(nrt))
@@ -1632,7 +1654,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        netevent.new = &nrt->dst;
        call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
 
-       if (rt->rt6i_flags&RTF_CACHE) {
+       if (rt->rt6i_flags & RTF_CACHE) {
                ip6_del_rt(rt);
                return;
        }
@@ -1653,7 +1675,7 @@ static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr
        int allfrag = 0;
 again:
        rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
-       if (rt == NULL)
+       if (!rt)
                return;
 
        if (rt6_check_expired(rt)) {
@@ -1703,7 +1725,7 @@ again:
           1. It is connected route. Action: COW
           2. It is gatewayed route or NONEXTHOP route. Action: clone it.
         */
-       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, daddr, saddr);
        else
                nrt = rt6_alloc_clone(rt, daddr);
@@ -1759,7 +1781,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                                    const struct in6_addr *dest)
 {
-       struct net *net = dev_net(ort->rt6i_dev);
+       struct net *net = dev_net(ort->dst.dev);
        struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
                                            ort->dst.dev, 0);
 
@@ -1768,7 +1790,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                rt->dst.output = ort->dst.output;
                rt->dst.flags |= DST_HOST;
 
-               ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
+               rt->rt6i_dst.addr = *dest;
                rt->rt6i_dst.plen = 128;
                dst_copy_metrics(&rt->dst, &ort->dst);
                rt->dst.error = ort->dst.error;
@@ -1776,9 +1798,9 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                if (rt->rt6i_idev)
                        in6_dev_hold(rt->rt6i_idev);
                rt->dst.lastuse = jiffies;
-               rt->rt6i_expires = 0;
+               rt->dst.expires = 0;
 
-               ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+               rt->rt6i_gateway = ort->rt6i_gateway;
                rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
                rt->rt6i_metric = 0;
 
@@ -1801,7 +1823,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
        struct fib6_table *table;
 
        table = fib6_get_table(net, RT6_TABLE_INFO);
-       if (table == NULL)
+       if (!table)
                return NULL;
 
        write_lock_bh(&table->tb6_lock);
@@ -1810,7 +1832,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                goto out;
 
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->rt6i_dev->ifindex != ifindex)
+               if (rt->dst.dev->ifindex != ifindex)
                        continue;
                if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
                        continue;
@@ -1841,8 +1863,8 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                .fc_nlinfo.nl_net = net,
        };
 
-       ipv6_addr_copy(&cfg.fc_dst, prefix);
-       ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+       cfg.fc_dst = *prefix;
+       cfg.fc_gateway = *gwaddr;
 
        /* We should treat it as a default route if prefix length is 0. */
        if (!prefixlen)
@@ -1860,12 +1882,12 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        struct fib6_table *table;
 
        table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
-       if (table == NULL)
+       if (!table)
                return NULL;
 
        write_lock_bh(&table->tb6_lock);
        for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
-               if (dev == rt->rt6i_dev &&
+               if (dev == rt->dst.dev &&
                    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
                    ipv6_addr_equal(&rt->rt6i_gateway, addr))
                        break;
@@ -1891,7 +1913,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                .fc_nlinfo.nl_net = dev_net(dev),
        };
 
-       ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+       cfg.fc_gateway = *gwaddr;
 
        ip6_route_add(&cfg);
 
@@ -1905,7 +1927,7 @@ void rt6_purge_dflt_routers(struct net *net)
 
        /* NOTE: Keep consistent with rt6_get_dflt_router */
        table = fib6_get_table(net, RT6_TABLE_DFLT);
-       if (table == NULL)
+       if (!table)
                return;
 
 restart:
@@ -1937,9 +1959,9 @@ static void rtmsg_to_fib6_config(struct net *net,
 
        cfg->fc_nlinfo.nl_net = net;
 
-       ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
-       ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
-       ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
+       cfg->fc_dst = rtmsg->rtmsg_dst;
+       cfg->fc_src = rtmsg->rtmsg_src;
+       cfg->fc_gateway = rtmsg->rtmsg_gateway;
 }
 
 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
@@ -2038,14 +2060,14 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
 
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                    const struct in6_addr *addr,
-                                   int anycast)
+                                   bool anycast)
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
                                            net->loopback_dev, 0);
-       struct neighbour *neigh;
+       int err;
 
-       if (rt == NULL) {
+       if (!rt) {
                if (net_ratelimit())
                        pr_warning("IPv6:  Maximum number of routes reached,"
                                   " consider increasing route/max_size.\n");
@@ -2065,15 +2087,13 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                rt->rt6i_flags |= RTF_ANYCAST;
        else
                rt->rt6i_flags |= RTF_LOCAL;
-       neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
-       if (IS_ERR(neigh)) {
+       err = rt6_bind_neighbour(rt, rt->dst.dev);
+       if (err) {
                dst_free(&rt->dst);
-
-               return ERR_CAST(neigh);
+               return ERR_PTR(err);
        }
-       dst_set_neighbour(&rt->dst, neigh);
 
-       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+       rt->rt6i_dst.addr = *addr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
 
@@ -2091,7 +2111,7 @@ int ip6_route_get_saddr(struct net *net,
        struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
        int err = 0;
        if (rt->rt6i_prefsrc.plen)
-               ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
+               *saddr = rt->rt6i_prefsrc.addr;
        else
                err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
                                         daddr, prefs, saddr);
@@ -2111,7 +2131,7 @@ static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
        struct net *net = ((struct arg_dev_net_ip *)arg)->net;
        struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
 
-       if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
+       if (((void *)rt->dst.dev == dev || !dev) &&
            rt != net->ipv6.ip6_null_entry &&
            ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
                /* remove prefsrc entry */
@@ -2141,11 +2161,10 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
        const struct arg_dev_net *adn = arg;
        const struct net_device *dev = adn->dev;
 
-       if ((rt->rt6i_dev == dev || dev == NULL) &&
-           rt != adn->net->ipv6.ip6_null_entry) {
-               RT6_TRACE("deleted by ifdown %p\n", rt);
+       if ((rt->dst.dev == dev || !dev) &&
+           rt != adn->net->ipv6.ip6_null_entry)
                return -1;
-       }
+
        return 0;
 }
 
@@ -2178,7 +2197,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
        */
 
        idev = __in6_dev_get(arg->dev);
-       if (idev == NULL)
+       if (!idev)
                return 0;
 
        /* For administrative MTU increase, there is no way to discover
@@ -2195,7 +2214,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
           also have the lowest MTU, TOO BIG MESSAGE will be lead to
           PMTU discouvery.
         */
-       if (rt->rt6i_dev == arg->dev &&
+       if (rt->dst.dev == arg->dev &&
            !dst_metric_locked(&rt->dst, RTAX_MTU) &&
            (dst_mtu(&rt->dst) >= arg->mtu ||
             (dst_mtu(&rt->dst) < arg->mtu &&
@@ -2344,11 +2363,13 @@ static int rt6_fill_node(struct net *net,
                         int iif, int type, u32 pid, u32 seq,
                         int prefix, int nowait, unsigned int flags)
 {
+       const struct inet_peer *peer;
        struct rtmsg *rtm;
        struct nlmsghdr *nlh;
        long expires;
        u32 table;
        struct neighbour *n;
+       u32 ts, tsage;
 
        if (prefix) {   /* user wants prefix routes only */
                if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2358,7 +2379,7 @@ static int rt6_fill_node(struct net *net,
        }
 
        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        rtm = nlmsg_data(nlh);
@@ -2372,25 +2393,25 @@ static int rt6_fill_node(struct net *net,
                table = RT6_TABLE_UNSPEC;
        rtm->rtm_table = table;
        NLA_PUT_U32(skb, RTA_TABLE, table);
-       if (rt->rt6i_flags&RTF_REJECT)
+       if (rt->rt6i_flags & RTF_REJECT)
                rtm->rtm_type = RTN_UNREACHABLE;
-       else if (rt->rt6i_flags&RTF_LOCAL)
+       else if (rt->rt6i_flags & RTF_LOCAL)
                rtm->rtm_type = RTN_LOCAL;
-       else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
+       else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
                rtm->rtm_type = RTN_LOCAL;
        else
                rtm->rtm_type = RTN_UNICAST;
        rtm->rtm_flags = 0;
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->rt6i_protocol;
-       if (rt->rt6i_flags&RTF_DYNAMIC)
+       if (rt->rt6i_flags & RTF_DYNAMIC)
                rtm->rtm_protocol = RTPROT_REDIRECT;
        else if (rt->rt6i_flags & RTF_ADDRCONF)
                rtm->rtm_protocol = RTPROT_KERNEL;
-       else if (rt->rt6i_flags&RTF_DEFAULT)
+       else if (rt->rt6i_flags & RTF_DEFAULT)
                rtm->rtm_protocol = RTPROT_RA;
 
-       if (rt->rt6i_flags&RTF_CACHE)
+       if (rt->rt6i_flags & RTF_CACHE)
                rtm->rtm_flags |= RTM_F_CLONED;
 
        if (dst) {
@@ -2430,7 +2451,7 @@ static int rt6_fill_node(struct net *net,
 
        if (rt->rt6i_prefsrc.plen) {
                struct in6_addr saddr_buf;
-               ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
+               saddr_buf = rt->rt6i_prefsrc.addr;
                NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
        }
 
@@ -2438,24 +2459,31 @@ static int rt6_fill_node(struct net *net,
                goto nla_put_failure;
 
        rcu_read_lock();
-       n = dst_get_neighbour(&rt->dst);
+       n = dst_get_neighbour_noref(&rt->dst);
        if (n)
                NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
        rcu_read_unlock();
 
        if (rt->dst.dev)
-               NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
+               NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
 
        NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
 
        if (!(rt->rt6i_flags & RTF_EXPIRES))
                expires = 0;
-       else if (rt->rt6i_expires - jiffies < INT_MAX)
-               expires = rt->rt6i_expires - jiffies;
+       else if (rt->dst.expires - jiffies < INT_MAX)
+               expires = rt->dst.expires - jiffies;
        else
                expires = INT_MAX;
 
-       if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
+       peer = rt->rt6i_peer;
+       ts = tsage = 0;
+       if (peer && peer->tcp_ts_stamp) {
+               ts = peer->tcp_ts;
+               tsage = get_seconds() - peer->tcp_ts_stamp;
+       }
+
+       if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
                               expires, rt->dst.error) < 0)
                goto nla_put_failure;
 
@@ -2504,14 +2532,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
                if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
                        goto errout;
 
-               ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC]));
+               fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
        }
 
        if (tb[RTA_DST]) {
                if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
                        goto errout;
 
-               ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST]));
+               fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
        }
 
        if (tb[RTA_IIF])
@@ -2530,7 +2558,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        }
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (skb == NULL) {
+       if (!skb) {
                err = -ENOBUFS;
                goto errout;
        }
@@ -2565,10 +2593,10 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
        int err;
 
        err = -ENOBUFS;
-       seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
+       seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 
        skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
@@ -2635,7 +2663,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
        seq_puts(m, "00000000000000000000000000000000 00 ");
 #endif
        rcu_read_lock();
-       n = dst_get_neighbour(&rt->dst);
+       n = dst_get_neighbour_noref(&rt->dst);
        if (n) {
                seq_printf(m, "%pi6", n->primary_key);
        } else {
@@ -2645,14 +2673,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
        seq_printf(m, " %08x %08x %08x %08x %8s\n",
                   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
                   rt->dst.__use, rt->rt6i_flags,
-                  rt->rt6i_dev ? rt->rt6i_dev->name : "");
+                  rt->dst.dev ? rt->dst.dev->name : "");
        return 0;
 }
 
 static int ipv6_route_show(struct seq_file *m, void *v)
 {
        struct net *net = (struct net *)m->private;
-       fib6_clean_all(net, rt6_info_route, 0, m);
+       fib6_clean_all_ro(net, rt6_info_route, 0, m);
        return 0;
 }
 
index a7a18602a046e1ffe5f0f00883844459802f4a25..3b6dac956bb09e74cd9119b7763b786b02789743 100644 (file)
@@ -91,7 +91,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
 {
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
 
        ipip6_tunnel_link(sitn, nt);
@@ -680,7 +682,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = dst_get_neighbour(skb_dst(skb));
+                       neigh = dst_get_neighbour_noref(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -705,7 +707,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = dst_get_neighbour(skb_dst(skb));
+                       neigh = dst_get_neighbour_noref(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -914,7 +916,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                goto done;
 #ifdef CONFIG_IPV6_SIT_6RD
                } else {
-                       ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
+                       ip6rd.prefix = t->ip6rd.prefix;
                        ip6rd.relay_prefix = t->ip6rd.relay_prefix;
                        ip6rd.prefixlen = t->ip6rd.prefixlen;
                        ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
@@ -1082,7 +1084,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                        if (relay_prefix != ip6rd.relay_prefix)
                                goto done;
 
-                       ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
+                       t->ip6rd.prefix = prefix;
                        t->ip6rd.relay_prefix = relay_prefix;
                        t->ip6rd.prefixlen = ip6rd.prefixlen;
                        t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
 static int __net_init sit_init_net(struct net *net)
 {
        struct sit_net *sitn = net_generic(net, sit_net_id);
+       struct ip_tunnel *t;
        int err;
 
        sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
        if ((err = register_netdev(sitn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(sitn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index 5a0d6648bbbc79107baca7a39358463ab31d02ea..8e951d8d3b814e6eefc2583ee054f5a85b75ba91 100644 (file)
@@ -200,8 +200,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        req->mss = mss;
        ireq->rmt_port = th->source;
        ireq->loc_port = th->dest;
-       ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -237,9 +237,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                struct flowi6 fl6;
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_TCP;
-               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               fl6.daddr = ireq6->rmt_addr;
                final_p = fl6_update_dst(&fl6, np->opt, &final);
-               ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+               fl6.saddr = ireq6->loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.flowi6_mark = sk->sk_mark;
                fl6.fl6_dport = inet_rsk(req)->rmt_port;
index 36131d122a6f3f9007776ff343a9197bd0430b0a..906c7ca43542e020c53759ec554f952f20b9d1e4 100644 (file)
@@ -62,6 +62,7 @@
 #include <net/netdma.h>
 #include <net/inet_common.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -153,7 +154,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
-                       ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+                       usin->sin6_addr = flowlabel->dst;
                        fl6_sock_release(flowlabel);
                }
        }
@@ -195,7 +196,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                tp->write_seq = 0;
        }
 
-       ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+       np->daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -244,9 +245,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                saddr = &np->rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr,
-                      (saddr ? saddr : &np->saddr));
+       fl6.daddr = np->daddr;
+       fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = usin->sin6_port;
@@ -264,11 +264,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               ipv6_addr_copy(&np->rcv_saddr, saddr);
+               np->rcv_saddr = *saddr;
        }
 
        /* set the source address */
-       ipv6_addr_copy(&np->saddr, saddr);
+       np->saddr = *saddr;
        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
        sk->sk_gso_type = SKB_GSO_TCPV6;
@@ -398,8 +398,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                         */
                        memset(&fl6, 0, sizeof(fl6));
                        fl6.flowi6_proto = IPPROTO_TCP;
-                       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-                       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+                       fl6.daddr = np->daddr;
+                       fl6.saddr = np->saddr;
                        fl6.flowi6_oif = sk->sk_bound_dev_if;
                        fl6.flowi6_mark = sk->sk_mark;
                        fl6.fl6_dport = inet->inet_dport;
@@ -489,8 +489,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
-       ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+       fl6.daddr = treq->rmt_addr;
+       fl6.saddr = treq->loc_addr;
        fl6.flowlabel = 0;
        fl6.flowi6_oif = treq->iif;
        fl6.flowi6_mark = sk->sk_mark;
@@ -512,7 +512,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
        if (skb) {
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
-               ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+               fl6.daddr = treq->rmt_addr;
                err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -617,8 +617,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
                        tp->md5sig_info->alloced6++;
                }
 
-               ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
-                              peer);
+               tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
                tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
                tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
 
@@ -750,8 +749,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 
        bp = &hp->md5_blk.ip6;
        /* 1. TCP pseudo-header (RFC2460) */
-       ipv6_addr_copy(&bp->saddr, saddr);
-       ipv6_addr_copy(&bp->daddr, daddr);
+       bp->saddr = *saddr;
+       bp->daddr = *daddr;
        bp->protocol = cpu_to_be32(IPPROTO_TCP);
        bp->len = cpu_to_be32(nbytes);
 
@@ -1039,8 +1038,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 #endif
 
        memset(&fl6, 0, sizeof(fl6));
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
+       fl6.daddr = ipv6_hdr(skb)->saddr;
+       fl6.saddr = ipv6_hdr(skb)->daddr;
 
        buff->ip_summed = CHECKSUM_PARTIAL;
        buff->csum = 0;
@@ -1250,11 +1249,18 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_openreq_init(req, &tmp_opt, skb);
 
        treq = inet6_rsk(req);
-       ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
+       treq->rmt_addr = ipv6_hdr(skb)->saddr;
+       treq->loc_addr = ipv6_hdr(skb)->daddr;
        if (!want_cookie || tmp_opt.tstamp_ok)
                TCP_ECN_create_request(req, tcp_hdr(skb));
 
+       treq->iif = sk->sk_bound_dev_if;
+
+       /* So that link locals have meaning */
+       if (!sk->sk_bound_dev_if &&
+           ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               treq->iif = inet6_iif(skb);
+
        if (!isn) {
                struct inet_peer *peer = NULL;
 
@@ -1264,12 +1270,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                        atomic_inc(&skb->users);
                        treq->pktopts = skb;
                }
-               treq->iif = sk->sk_bound_dev_if;
-
-               /* So that link locals have meaning */
-               if (!sk->sk_bound_dev_if &&
-                   ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-                       treq->iif = inet6_iif(skb);
 
                if (want_cookie) {
                        isn = cookie_v6_init_sequence(sk, skb, &req->mss);
@@ -1380,7 +1380,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+               newnp->rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
                newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1444,9 +1444,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
-       ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
-       ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
+       newnp->daddr = treq->rmt_addr;
+       newnp->saddr = treq->loc_addr;
+       newnp->rcv_saddr = treq->loc_addr;
        newsk->sk_bound_dev_if = treq->iif;
 
        /* Now IPv6 options...
@@ -1995,7 +1995,8 @@ static int tcp_v6_init_sock(struct sock *sk)
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
        local_bh_disable();
-       percpu_counter_inc(&tcp_sockets_allocated);
+       sock_update_memcg(sk);
+       sk_sockets_allocated_inc(sk);
        local_bh_enable();
 
        return 0;
@@ -2214,7 +2215,6 @@ struct proto tcpv6_prot = {
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
        .orphan_count           = &tcp_orphan_count,
-       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
@@ -2228,6 +2228,9 @@ struct proto tcpv6_prot = {
        .compat_setsockopt      = compat_tcp_setsockopt,
        .compat_getsockopt      = compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       .proto_cgroup           = tcp_proto_cgroup,
+#endif
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
index 846f4757eb8d46394a604595be0698d485ae1ab0..4f96b5c636856eef46c0ec3a0dd990ae997ee7b0 100644 (file)
@@ -238,7 +238,7 @@ exact_match:
        return result;
 }
 
-static struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(struct net *net,
                                      const struct in6_addr *saddr, __be16 sport,
                                      const struct in6_addr *daddr, __be16 dport,
                                      int dif, struct udp_table *udptable)
@@ -305,6 +305,7 @@ begin:
        rcu_read_unlock();
        return result;
 }
+EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 
 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
                                          __be16 sport, __be16 dport,
@@ -340,7 +341,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@ -363,9 +364,10 @@ try_again:
                goto out;
 
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
 
        is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -376,14 +378,14 @@ try_again:
         * coverage checksum (UDP-Lite), do it before the copy.
         */
 
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
 
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov,len);
+                                             msg->msg_iov, copied       );
        else {
                err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
                if (err == -EINVAL)
@@ -417,8 +419,7 @@ try_again:
                        ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
                                               &sin6->sin6_addr);
                else {
-                       ipv6_addr_copy(&sin6->sin6_addr,
-                                      &ipv6_hdr(skb)->saddr);
+                       sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin6->sin6_scope_id = IP6CB(skb)->iif;
                }
@@ -432,7 +433,7 @@ try_again:
                        datagram_recv_ctl(sk, msg, skb);
        }
 
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
 
@@ -538,7 +539,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
+       skb_dst_drop(skb);
+       rc = sock_queue_rcv_skb(sk, skb);
+       if (rc < 0) {
                /* Note that an ENOMEM error is charged twice */
                if (rc == -ENOMEM)
                        UDP6_INC_STATS_BH(sock_net(sk),
@@ -1113,11 +1116,11 @@ do_udp_sendmsg:
 
        fl6.flowi6_proto = sk->sk_protocol;
        if (!ipv6_addr_any(daddr))
-               ipv6_addr_copy(&fl6.daddr, daddr);
+               fl6.daddr = *daddr;
        else
                fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
        if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+               fl6.saddr = np->saddr;
        fl6.fl6_sport = inet->inet_sport;
 
        final_p = fl6_update_dst(&fl6, opt, &final);
@@ -1298,7 +1301,8 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
index 3437d7d4eed6dcff351adcd7645dffd919c3477e..a81ce9450750f87a207499e888814a76009c0286 100644 (file)
@@ -72,8 +72,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
                top_iph->nexthdr = IPPROTO_BEETPH;
        }
 
-       ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
-       ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
+       top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+       top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
        return 0;
 }
 
@@ -99,8 +99,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 
        ip6h = ipv6_hdr(skb);
        ip6h->payload_len = htons(skb->len - size);
-       ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
-       ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
+       ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6;
+       ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6;
        err = 0;
 out:
        return err;
index 4d6edff0498f6a1e55e2301aa958d0fde5556baa..261e6e6f487e818eba1e670bace7f031efd60631 100644 (file)
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
                dsfield &= ~INET_ECN_MASK;
        ipv6_change_dsfield(top_iph, 0, dsfield);
        top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
-       ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
-       ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
+       top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+       top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
        return 0;
 }
 
index faae41737fca3ddc77c0e125ed854ae1b580d609..4eeff89c1aaa7d44d8b6369583b0b9e1c15317e9 100644 (file)
@@ -49,7 +49,7 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
        struct sock *sk = skb->sk;
 
        fl6.flowi6_oif = sk->sk_bound_dev_if;
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+       fl6.daddr = ipv6_hdr(skb)->daddr;
 
        ipv6_local_rxpmtu(sk, &fl6, mtu);
 }
@@ -60,7 +60,7 @@ static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
        struct sock *sk = skb->sk;
 
        fl6.fl6_dport = inet_sk(sk)->inet_dport;
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+       fl6.daddr = ipv6_hdr(skb)->daddr;
 
        ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
 }
index d879f7efbd10b676be5887ab6460cfb8bee881d9..8ea65e032733731d15b7579a1047f56d307cbbe3 100644 (file)
@@ -132,8 +132,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
        memset(fl6, 0, sizeof(struct flowi6));
        fl6->flowi6_mark = skb->mark;
 
-       ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr);
-       ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr);
+       fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
+       fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
 
        while (nh + offset + 1 < skb->data ||
               pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
index f2d72b8a3faa14e0cf8f7c179d86f4d33e88b961..3f2f7c4ab7210d5948c22a6e417bc0046b1631b0 100644 (file)
@@ -27,8 +27,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
 
        /* Initialize temporary selector matching only
         * to current session. */
-       ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr);
-       ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr);
+       *(struct in6_addr *)&sel->daddr = fl6->daddr;
+       *(struct in6_addr *)&sel->saddr = fl6->saddr;
        sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
        sel->dport_mask = htons(0xffff);
        sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
index c24f25ab67d324f64f67126ed8f27639d5988a34..bb14c34776801e7dd43ce13c8552794d89a5c197 100644 (file)
@@ -2558,8 +2558,8 @@ bed:
                        self->errno = 0;
                        setup_timer(&self->watchdog, irda_discovery_timeout,
                                        (unsigned long)self);
-                       self->watchdog.expires = jiffies + (val * HZ/1000);
-                       add_timer(&(self->watchdog));
+                       mod_timer(&self->watchdog,
+                                 jiffies + msecs_to_jiffies(val));
 
                        /* Wait for IR-LMP to call us back */
                        __wait_event_interruptible(self->query_wait,
index 779117636270965162e3d1d8a834788cd6aa6fc8..579617cca125e014cd93506e5d419c8c94f8aff1 100644 (file)
@@ -67,7 +67,7 @@ static void *ckey;
 static void *skey;
 
 /* Module parameters */
-static int eth;   /* Use "eth" or "irlan" name for devices */
+static bool eth;   /* Use "eth" or "irlan" name for devices */
 static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */
 
 #ifdef CONFIG_PROC_FS
index 32e3bb0261105f51ccc24e21026a54e90829e855..5c93f2952b082b6fa5ef6c0bc8211dfdb68f3693 100644 (file)
@@ -1461,14 +1461,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
        }
 
        /* Allocate a new instance */
-       new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+       new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
        if (!new) {
                IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
                spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
                return NULL;
        }
-       /* Dup */
-       memcpy(new, orig, sizeof(struct tsap_cb));
        spin_lock_init(&new->lock);
 
        /* We don't need the old instance any more */
index 274d150320c0f52b0b88fbc5b1c1817bd45cfc24..d5c5b8fd1d01e50f2c9a928809184b28e77ea4bd 100644 (file)
@@ -130,6 +130,17 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
        memcpy(&dst[8], src, 8);
 }
 
+static void iucv_skb_queue_purge(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(list)) != NULL) {
+               if (skb->dev)
+                       dev_put(skb->dev);
+               kfree_skb(skb);
+       }
+}
+
 static int afiucv_pm_prepare(struct device *dev)
 {
 #ifdef CONFIG_PM_DEBUG
@@ -164,10 +175,9 @@ static int afiucv_pm_freeze(struct device *dev)
        read_lock(&iucv_sk_list.lock);
        sk_for_each(sk, node, &iucv_sk_list.head) {
                iucv = iucv_sk(sk);
-               skb_queue_purge(&iucv->send_skb_q);
+               iucv_skb_queue_purge(&iucv->send_skb_q);
                skb_queue_purge(&iucv->backlog_skb_q);
                switch (sk->sk_state) {
-               case IUCV_SEVERED:
                case IUCV_DISCONN:
                case IUCV_CLOSING:
                case IUCV_CONNECTED:
@@ -212,7 +222,6 @@ static int afiucv_pm_restore_thaw(struct device *dev)
                        sk->sk_state_change(sk);
                        break;
                case IUCV_DISCONN:
-               case IUCV_SEVERED:
                case IUCV_CLOSING:
                case IUCV_LISTEN:
                case IUCV_BOUND:
@@ -366,9 +375,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
        if (imsg)
                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 
-       rcu_read_lock();
-       skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
-       rcu_read_unlock();
+       skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
        if (!skb->dev)
                return -ENODEV;
        if (!(skb->dev->flags & IFF_UP))
@@ -388,6 +395,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
        err = dev_queue_xmit(skb);
        if (err) {
                skb_unlink(nskb, &iucv->send_skb_q);
+               dev_put(nskb->dev);
                kfree_skb(nskb);
        } else {
                atomic_sub(confirm_recv, &iucv->msg_recv);
@@ -396,25 +404,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
        return err;
 }
 
-/* Timers */
-static void iucv_sock_timeout(unsigned long arg)
-{
-       struct sock *sk = (struct sock *)arg;
-
-       bh_lock_sock(sk);
-       sk->sk_err = ETIMEDOUT;
-       sk->sk_state_change(sk);
-       bh_unlock_sock(sk);
-
-       iucv_sock_kill(sk);
-       sock_put(sk);
-}
-
-static void iucv_sock_clear_timer(struct sock *sk)
-{
-       sk_stop_timer(sk, &sk->sk_timer);
-}
-
 static struct sock *__iucv_get_sock_by_name(char *nm)
 {
        struct sock *sk;
@@ -467,7 +456,6 @@ static void iucv_sock_close(struct sock *sk)
        int err, blen;
        struct sk_buff *skb;
 
-       iucv_sock_clear_timer(sk);
        lock_sock(sk);
 
        switch (sk->sk_state) {
@@ -481,16 +469,14 @@ static void iucv_sock_close(struct sock *sk)
                        blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
                        skb = sock_alloc_send_skb(sk, blen, 1, &err);
                        if (skb) {
-                               skb_reserve(skb,
-                                       sizeof(struct af_iucv_trans_hdr) +
-                                       ETH_HLEN);
+                               skb_reserve(skb, blen);
                                err = afiucv_hs_send(NULL, sk, skb,
                                                     AF_IUCV_FLAG_FIN);
                        }
                        sk->sk_state = IUCV_DISCONN;
                        sk->sk_state_change(sk);
                }
-       case IUCV_DISCONN:
+       case IUCV_DISCONN:   /* fall through */
                sk->sk_state = IUCV_CLOSING;
                sk->sk_state_change(sk);
 
@@ -520,7 +506,7 @@ static void iucv_sock_close(struct sock *sk)
                sk->sk_err = ECONNRESET;
                sk->sk_state_change(sk);
 
-               skb_queue_purge(&iucv->send_skb_q);
+               iucv_skb_queue_purge(&iucv->send_skb_q);
                skb_queue_purge(&iucv->backlog_skb_q);
                break;
 
@@ -581,8 +567,6 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
        sk->sk_protocol = proto;
        sk->sk_state    = IUCV_OPEN;
 
-       setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
-
        iucv_sock_link(&iucv_sk_list, sk);
        return sk;
 }
@@ -675,16 +659,12 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
                }
 
                if (sk->sk_state == IUCV_CONNECTED ||
-                   sk->sk_state == IUCV_SEVERED ||
-                   sk->sk_state == IUCV_DISCONN ||     /* due to PM restore */
+                   sk->sk_state == IUCV_DISCONN ||
                    !newsock) {
                        iucv_accept_unlink(sk);
                        if (newsock)
                                sock_graft(sk, newsock);
 
-                       if (sk->sk_state == IUCV_SEVERED)
-                               sk->sk_state = IUCV_DISCONN;
-
                        release_sock(sk);
                        return sk;
                }
@@ -739,7 +719,7 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
                if (!memcmp(dev->perm_addr, uid, 8)) {
                        memcpy(iucv->src_name, sa->siucv_name, 8);
                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
-                       sock->sk->sk_bound_dev_if = dev->ifindex;
+                       sk->sk_bound_dev_if = dev->ifindex;
                        sk->sk_state = IUCV_BOUND;
                        iucv->transport = AF_IUCV_TRANS_HIPER;
                        if (!iucv->msglimit)
@@ -774,16 +754,13 @@ done:
 static int iucv_sock_autobind(struct sock *sk)
 {
        struct iucv_sock *iucv = iucv_sk(sk);
-       char query_buffer[80];
        char name[12];
        int err = 0;
 
-       /* Set the userid and name */
-       cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
-       if (unlikely(err))
+       if (unlikely(!pr_iucv))
                return -EPROTO;
 
-       memcpy(iucv->src_user_id, query_buffer, 8);
+       memcpy(iucv->src_user_id, iucv_userid, 8);
 
        write_lock_bh(&iucv_sk_list.lock);
 
@@ -1225,6 +1202,8 @@ release:
        return len;
 
 fail:
+       if (skb->dev)
+               dev_put(skb->dev);
        kfree_skb(skb);
 out:
        release_sock(sk);
@@ -1357,7 +1336,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        int blen;
        int err = 0;
 
-       if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+       if ((sk->sk_state == IUCV_DISCONN) &&
            skb_queue_empty(&iucv->backlog_skb_q) &&
            skb_queue_empty(&sk->sk_receive_queue) &&
            list_empty(&iucv->message_q.list))
@@ -1441,9 +1420,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                                        ETH_HLEN;
                                sskb = sock_alloc_send_skb(sk, blen, 1, &err);
                                if (sskb) {
-                                       skb_reserve(sskb,
-                                               sizeof(struct af_iucv_trans_hdr)
-                                               + ETH_HLEN);
+                                       skb_reserve(sskb, blen);
                                        err = afiucv_hs_send(NULL, sk, sskb,
                                                             AF_IUCV_FLAG_WIN);
                                }
@@ -1506,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == IUCV_CLOSED)
                mask |= POLLHUP;
 
-       if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
+       if (sk->sk_state == IUCV_DISCONN)
                mask |= POLLIN;
 
        if (sock_writeable(sk))
@@ -1533,7 +1510,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
        switch (sk->sk_state) {
        case IUCV_DISCONN:
        case IUCV_CLOSING:
-       case IUCV_SEVERED:
        case IUCV_CLOSED:
                err = -ENOTCONN;
                goto fail;
@@ -1888,10 +1864,7 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
 {
        struct sock *sk = path->private;
 
-       if (!list_empty(&iucv_sk(sk)->accept_q))
-               sk->sk_state = IUCV_SEVERED;
-       else
-               sk->sk_state = IUCV_DISCONN;
+       sk->sk_state = IUCV_DISCONN;
 
        sk->sk_state_change(sk);
 }
@@ -2051,10 +2024,7 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
        /* other end of connection closed */
        if (iucv) {
                bh_lock_sock(sk);
-               if (!list_empty(&iucv->accept_q))
-                       sk->sk_state = IUCV_SEVERED;
-               else
-                       sk->sk_state = IUCV_DISCONN;
+               sk->sk_state = IUCV_DISCONN;
                sk->sk_state_change(sk);
                bh_unlock_sock(sk);
        }
@@ -2209,6 +2179,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
                break;
        case 0:
                /* plain data frame */
+               memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
+                      CB_TRGCLS_LEN);
                err = afiucv_hs_callback_rx(sk, skb);
                break;
        default:
@@ -2259,6 +2231,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
                        case TX_NOTIFY_OK:
                                __skb_unlink(this, list);
                                iucv_sock_wake_msglim(sk);
+                               dev_put(this->dev);
                                kfree_skb(this);
                                break;
                        case TX_NOTIFY_PENDING:
@@ -2269,6 +2242,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
                                atomic_dec(&iucv->pendings);
                                if (atomic_read(&iucv->pendings) <= 0)
                                        iucv_sock_wake_msglim(sk);
+                               dev_put(this->dev);
                                kfree_skb(this);
                                break;
                        case TX_NOTIFY_UNREACHABLE:
@@ -2277,11 +2251,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
                        case TX_NOTIFY_GENERALERROR:
                        case TX_NOTIFY_DELAYED_GENERALERROR:
                                __skb_unlink(this, list);
+                               dev_put(this->dev);
                                kfree_skb(this);
-                               if (!list_empty(&iucv->accept_q))
-                                       sk->sk_state = IUCV_SEVERED;
-                               else
-                                       sk->sk_state = IUCV_DISCONN;
+                               sk->sk_state = IUCV_DISCONN;
                                sk->sk_state_change(sk);
                                break;
                        }
@@ -2291,6 +2263,13 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
        }
        spin_unlock_irqrestore(&list->lock, flags);
 
+       if (sk->sk_state == IUCV_CLOSING) {
+               if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+                       sk->sk_state = IUCV_CLOSED;
+                       sk->sk_state_change(sk);
+               }
+       }
+
 out_unlock:
        bh_unlock_sock(sk);
 }
index 1e733e9073d0014cf4cfc5e3a6800624159008c2..11dbb2255ccbce3f1e34c34f35a56a9ebd9428c1 100644 (file)
@@ -375,7 +375,7 @@ static int verify_address_len(const void *p)
        const struct sadb_address *sp = p;
        const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
        const struct sockaddr_in *sin;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        const struct sockaddr_in6 *sin6;
 #endif
        int len;
@@ -387,7 +387,7 @@ static int verify_address_len(const void *p)
                    sp->sadb_address_prefixlen > 32)
                        return -EINVAL;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t));
                if (sp->sadb_address_len != len ||
@@ -469,7 +469,7 @@ static int present_and_same_family(const struct sadb_address *src,
        if (s_addr->sa_family != d_addr->sa_family)
                return 0;
        if (s_addr->sa_family != AF_INET
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
            && s_addr->sa_family != AF_INET6
 #endif
                )
@@ -579,7 +579,7 @@ static inline int pfkey_sockaddr_len(sa_family_t family)
        switch (family) {
        case AF_INET:
                return sizeof(struct sockaddr_in);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                return sizeof(struct sockaddr_in6);
 #endif
@@ -595,7 +595,7 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
                xaddr->a4 =
                        ((struct sockaddr_in *)sa)->sin_addr.s_addr;
                return AF_INET;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                memcpy(xaddr->a6,
                       &((struct sockaddr_in6 *)sa)->sin6_addr,
@@ -639,7 +639,7 @@ static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct
        case AF_INET:
                xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
                break;
@@ -705,14 +705,14 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
                memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
                return 32;
            }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
            {
                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
                sin6->sin6_family = AF_INET6;
                sin6->sin6_port = port;
                sin6->sin6_flowinfo = 0;
-               ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
+               sin6->sin6_addr = *(struct in6_addr *)xaddr->a6;
                sin6->sin6_scope_id = 0;
                return 128;
            }
@@ -1311,7 +1311,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
                xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr;
                xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr;
                xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr;
@@ -3146,7 +3146,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
                        return NULL;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                if (opt != IPV6_IPSEC_POLICY) {
                        *dir = -EOPNOTSUPP;
index bf8d50c67931e8e588520b7c49ad49fca51b40b3..89ff8c67943e8af26efe1bac80b306e297921ebd 100644 (file)
@@ -756,9 +756,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
                goto error;
        }
 
-       /* Point to L2TP header */
-       optr = ptr = skb->data;
-
        /* Trace packet contents, if enabled */
        if (tunnel->debug & L2TP_MSG_DATA) {
                length = min(32u, skb->len);
@@ -769,12 +766,15 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
 
                offset = 0;
                do {
-                       printk(" %02X", ptr[offset]);
+                       printk(" %02X", skb->data[offset]);
                } while (++offset < length);
 
                printk("\n");
        }
 
+       /* Point to L2TP header */
+       optr = ptr = skb->data;
+
        /* Get L2TP header flags */
        hdrflags = ntohs(*(__be16 *) ptr);
 
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
 
        /* Get routing info from the tunnel socket */
        skb_dst_drop(skb);
-       skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+       skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
 
        inet = inet_sk(sk);
        fl = &inet->cork.fl;
index dfd3a648a55107bda2ff14adb6f9e91c06449240..a18e6c3d36e37e699089ed5e0910c857da073d1c 100644 (file)
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied += used;
                len -= used;
 
+               /* For non stream protcols we get one packet per recvmsg call */
+               if (sk->sk_type != SOCK_STREAM)
+                       goto copy_uaddr;
+
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, 0);
                        *seq = 0;
                }
 
-               /* For non stream protcols we get one packet per recvmsg call */
-               if (sk->sk_type != SOCK_STREAM)
-                       goto copy_uaddr;
-
                /* Partial read */
                if (used + offset < skb->len)
                        continue;
@@ -857,6 +857,12 @@ copy_uaddr:
        }
        if (llc_sk(sk)->cmsg_flags)
                llc_cmsg_rcv(msg, skb);
+
+       if (!(flags & MSG_PEEK)) {
+                       sk_eat_skb(sk, skb, 0);
+                       *seq = 0;
+       }
+
        goto out;
 }
 
index aeda65466f3eb60a4dcb37cd8a2cae152ffe794c..502d3ecc4a797b4004128604e5324d0cfc198bfe 100644 (file)
@@ -318,7 +318,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
                        rinfo[i].diff = i * pinfo->norm_offset;
        }
        for (i = 1; i < sband->n_bitrates; i++) {
-               s = 0;
+               s = false;
                for (j = 0; j < sband->n_bitrates - i; j++)
                        if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
                                     sband->bitrates[rinfo[j + 1].index].bitrate)) {
@@ -327,7 +327,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
                                rinfo[j + 1].index = tmp;
                                rinfo[rinfo[j].index].rev_index = j;
                                rinfo[rinfo[j + 1].index].rev_index = j + 1;
-                               s = 1;
+                               s = true;
                        }
                if (!s)
                        break;
index 2908e56eaa9d9b329b3894d71c378088b9397b08..9270771702fe9a5a12fef9fcde4554f30b18b58d 100644 (file)
@@ -106,7 +106,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
        /* save the ERP value so that it is available at association time */
        if (elems->erp_info && elems->erp_info_len >= 1) {
                bss->erp_value = elems->erp_info[0];
-               bss->has_erp_value = 1;
+               bss->has_erp_value = true;
        }
 
        if (elems->tim) {
index 8260b13d93c926cf9964d7a4f4e225be0123a68c..f8ac4ef0b7949827a49befea4fb1d6419b41fdde 100644 (file)
@@ -4,6 +4,14 @@ menu "Core Netfilter Configuration"
 config NETFILTER_NETLINK
        tristate
 
+config NETFILTER_NETLINK_ACCT
+tristate "Netfilter NFACCT over NFNETLINK interface"
+       depends on NETFILTER_ADVANCED
+       select NETFILTER_NETLINK
+       help
+         If this option is enabled, the kernel will include support
+         for extended accounting via NFNETLINK.
+
 config NETFILTER_NETLINK_QUEUE
        tristate "Netfilter NFQUEUE over NFNETLINK interface"
        depends on NETFILTER_ADVANCED
@@ -75,6 +83,16 @@ config NF_CONNTRACK_ZONES
 
          If unsure, say `N'.
 
+config NF_CONNTRACK_PROCFS
+       bool "Supply CT list in procfs (OBSOLETE)"
+       default y
+       depends on PROC_FS
+       ---help---
+       This option enables for the list of known conntrack entries
+       to be shown in procfs under net/netfilter/nf_conntrack. This
+       is considered obsolete in favor of using the conntrack(8)
+       tool which uses Netlink.
+
 config NF_CONNTRACK_EVENTS
        bool "Connection tracking events"
        depends on NETFILTER_ADVANCED
@@ -201,7 +219,6 @@ config NF_CONNTRACK_BROADCAST
 
 config NF_CONNTRACK_NETBIOS_NS
        tristate "NetBIOS name service protocol support"
-       depends on NETFILTER_ADVANCED
        select NF_CONNTRACK_BROADCAST
        help
          NetBIOS name service requests are sent as broadcast messages from an
@@ -542,7 +559,6 @@ config NETFILTER_XT_TARGET_NOTRACK
        tristate  '"NOTRACK" target support'
        depends on IP_NF_RAW || IP6_NF_RAW
        depends on NF_CONNTRACK
-       depends on NETFILTER_ADVANCED
        help
          The NOTRACK target allows a select rule to specify
          which packets *not* to enter the conntrack/NAT
@@ -772,6 +788,15 @@ config NETFILTER_XT_MATCH_DSCP
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_ECN
+       tristate '"ecn" match support'
+       depends on NETFILTER_ADVANCED
+       ---help---
+       This option adds an "ECN" match, which allows you to match against
+       the IPv4 and TCP header ECN fields.
+
+       To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_MATCH_ESP
        tristate '"esp" match support'
        depends on NETFILTER_ADVANCED
@@ -881,6 +906,16 @@ config NETFILTER_XT_MATCH_MULTIPORT
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_NFACCT
+       tristate '"nfacct" match support'
+       depends on NETFILTER_ADVANCED
+       select NETFILTER_NETLINK_ACCT
+       help
+         This option allows you to use the extended accounting through
+         nfnetlink_acct.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_MATCH_OSF
        tristate '"osf" Passive OS fingerprint match'
        depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
index 1a02853df8638191106e8f9dece4b574399d441e..40f4c3d636c583d0deb95fa1af959e967c635c87 100644 (file)
@@ -7,6 +7,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
 obj-$(CONFIG_NETFILTER) = netfilter.o
 
 obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
+obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
 obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
 
@@ -80,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
@@ -90,6 +92,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_NFACCT) += xt_nfacct.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
index afca6c78948cfd77a1ad8b429f733482c4b11c1a..b4e8ff05b3014434242941165babe201822d9770 100644 (file)
@@ -54,6 +54,12 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
 
 struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
+
+#if defined(CONFIG_JUMP_LABEL)
+struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+EXPORT_SYMBOL(nf_hooks_needed);
+#endif
+
 static DEFINE_MUTEX(nf_hook_mutex);
 
 int nf_register_hook(struct nf_hook_ops *reg)
@@ -70,6 +76,9 @@ int nf_register_hook(struct nf_hook_ops *reg)
        }
        list_add_rcu(&reg->list, elem->list.prev);
        mutex_unlock(&nf_hook_mutex);
+#if defined(CONFIG_JUMP_LABEL)
+       jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
        return 0;
 }
 EXPORT_SYMBOL(nf_register_hook);
@@ -79,7 +88,9 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
        mutex_lock(&nf_hook_mutex);
        list_del_rcu(&reg->list);
        mutex_unlock(&nf_hook_mutex);
-
+#if defined(CONFIG_JUMP_LABEL)
+       jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
        synchronize_net();
 }
 EXPORT_SYMBOL(nf_unregister_hook);
@@ -218,7 +229,7 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
 }
 EXPORT_SYMBOL(skb_make_writable);
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
index 052579fe389ab774b0d7fa704d1c7b371fc96b8f..1f03556666f4196cf74e6e0b8433f27756c65ede 100644 (file)
@@ -109,16 +109,18 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 bool
 ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
                    __be16 *port, u8 *proto)
 {
        int protoff;
        u8 nexthdr;
+       __be16 frag_off;
 
        nexthdr = ipv6_hdr(skb)->nexthdr;
-       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+                                  &frag_off);
        if (protoff < 0)
                return false;
 
index f2d576e6b76972ad6f1a3d90127677dc41d253bf..4015fcaf87bc9556726e5a73b1ebaae1ab29595c 100644 (file)
@@ -241,7 +241,7 @@ hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
 static inline void
 hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
 {
-       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+       dst->ip.in6 = src->ip.in6;
 }
 
 static inline void
index 6ee10f5d59bd8a4fde8ad54447c5552d11863303..37d667e3f6f82d82e442b66d4c38dd592cba06d3 100644 (file)
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem data = { };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
index fb90e344e90709f205bef4c2a61b3855be1c7f56..e69e2718fbe162343eaf97defd30153155ee2ef1 100644 (file)
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem data = { };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
index deb3e3dfa5fcb13ba9f125f0a9dbcc7042d0735a..64199b4e93c952e24ca8c5508af1d3788b1286f8 100644 (file)
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 ip2_from = 0, ip2_to, ip2_last, ip2;
        u32 timeout = h->timeout;
        bool with_ports = false;
index 60d016541c586b102f8e97686bbb49db8a9329eb..28988196775e67a39bcf954e004068d2de456db5 100644 (file)
@@ -267,7 +267,7 @@ static inline void
 hash_net6_data_copy(struct hash_net6_elem *dst,
                    const struct hash_net6_elem *src)
 {
-       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+       dst->ip.in6 = src->ip.in6;
        dst->cidr = src->cidr;
 }
 
index 70bd1d0774c6bdb6b1f9d4d1f07284cfe43500dd..af4c0b8c5275d76ebac9e581b104e6f1cefcf304 100644 (file)
@@ -232,6 +232,21 @@ config     IP_VS_NQ
          If you want to compile it in kernel, say Y. To compile it as a
          module, choose M here. If unsure, say N.
 
+comment 'IPVS SH scheduler'
+
+config IP_VS_SH_TAB_BITS
+       int "IPVS source hashing table size (the Nth power of 2)"
+       range 4 20
+       default 8
+       ---help---
+         The source hashing scheduler maps source IPs to destinations
+         stored in a hash table. This table is tiled by each destination
+         until all slots in the table are filled. When using weights to
+         allow destinations to receive more connections, the table is
+         tiled an amount proportional to the weights specified. The table
+         needs to be large enough to effectively fit all the destinations
+         multiplied by their respective weights.
+
 comment 'IPVS application helper'
 
 config IP_VS_FTP
index 12571fb2881c2c3670aab3a765f7a6e0cf3afcf7..29fa5badde757d6de0ac2fc0359cc27ba37ff83b 100644 (file)
@@ -616,7 +616,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
        if ((cp) && (!cp->dest)) {
                dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
                                       cp->dport, &cp->vaddr, cp->vport,
-                                      cp->protocol, cp->fwmark);
+                                      cp->protocol, cp->fwmark, cp->flags);
                ip_vs_bind_dest(cp, dest);
                return dest;
        } else
index 093cc327020fba6f09a6b4bfcabfc9386d892d02..611c3359b94d9fcc6336bb456c97d4106f6c7892 100644 (file)
@@ -983,7 +983,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
        if (!cp)
                return NF_ACCEPT;
 
-       ipv6_addr_copy(&snet.in6, &iph->saddr);
+       snet.in6 = iph->saddr;
        return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
                                    pp, offset, sizeof(struct ipv6hdr));
 }
index 008bf97cc91a58b14a0ef9fd6924eb6f71063f11..b3afe189af61880464ef6562c568016509957293 100644 (file)
@@ -85,7 +85,7 @@ static int __ip_vs_addr_is_local_v6(struct net *net,
        };
 
        rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-       if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
+       if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
                return 1;
 
        return 0;
@@ -619,15 +619,21 @@ struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int af,
                                   const union nf_inet_addr *daddr,
                                   __be16 dport,
                                   const union nf_inet_addr *vaddr,
-                                  __be16 vport, __u16 protocol, __u32 fwmark)
+                                  __be16 vport, __u16 protocol, __u32 fwmark,
+                                  __u32 flags)
 {
        struct ip_vs_dest *dest;
        struct ip_vs_service *svc;
+       __be16 port = dport;
 
        svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
        if (!svc)
                return NULL;
-       dest = ip_vs_lookup_dest(svc, daddr, dport);
+       if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
+               port = 0;
+       dest = ip_vs_lookup_dest(svc, daddr, port);
+       if (!dest)
+               dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
        if (dest)
                atomic_inc(&dest->refcnt);
        ip_vs_service_put(svc);
index 13d607ae9c526a2372fc7340a9b2c8e403fbd97b..1aa5cac748c4ad5ee32cc6c2acb44ce2e9f5e78a 100644 (file)
@@ -108,7 +108,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
                                  struct ip_vs_conn *ct)
 
 {
-       bool ret = 0;
+       bool ret = false;
 
        if (ct->af == p->af &&
            ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) &&
@@ -121,7 +121,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
            ct->protocol == p->protocol &&
            ct->pe_data && ct->pe_data_len == p->pe_data_len &&
            !memcmp(ct->pe_data, p->pe_data, p->pe_data_len))
-               ret = 1;
+               ret = true;
 
        IP_VS_DBG_BUF(9, "SIP template match %s %s->%s:%d %s\n",
                      ip_vs_proto_name(p->protocol),
index 33815f4fb451c42aeb0dad99cf157c37cc32dfbf..069e8d4d5c015f19da7443df0ef5402d30db0a31 100644 (file)
  * server is dead or overloaded, the load balancer can bypass the cache
  * server and send requests to the original server directly.
  *
+ * The weight destination attribute can be used to control the
+ * distribution of connections to the destinations in servernode. The
+ * greater the weight, the more connections the destination
+ * will receive.
+ *
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -99,9 +104,11 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc)
        struct ip_vs_sh_bucket *b;
        struct list_head *p;
        struct ip_vs_dest *dest;
+       int d_count;
 
        b = tbl;
        p = &svc->destinations;
+       d_count = 0;
        for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
                if (list_empty(p)) {
                        b->dest = NULL;
@@ -113,7 +120,16 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc)
                        atomic_inc(&dest->refcnt);
                        b->dest = dest;
 
-                       p = p->next;
+                       IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
+                                     i, IP_VS_DBG_ADDR(svc->af, &dest->addr),
+                                     atomic_read(&dest->weight));
+
+                       /* Don't move to next dest until filling weight */
+                       if (++d_count >= atomic_read(&dest->weight)) {
+                               p = p->next;
+                               d_count = 0;
+                       }
+
                }
                b++;
        }
index 3cdd479f9b5d179292182bb2711ec243b57736ee..8a0d6d6889f0f7c419477d73598dee7f20750855 100644 (file)
@@ -603,9 +603,9 @@ sloop:
 #ifdef CONFIG_IP_VS_IPV6
        if (cp->af == AF_INET6) {
                p += sizeof(struct ip_vs_sync_v6);
-               ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
-               ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
-               ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+               s->v6.caddr = cp->caddr.in6;
+               s->v6.vaddr = cp->vaddr.in6;
+               s->v6.daddr = cp->daddr.in6;
        } else
 #endif
        {
@@ -740,7 +740,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
                 * but still handled.
                 */
                dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
-                                      param->vport, protocol, fwmark);
+                                      param->vport, protocol, fwmark, flags);
 
                /*  Set the approprite ativity flag */
                if (protocol == IPPROTO_TCP) {
index aa2d7206ee8a064a5fbc57e88f655fb42e7fe21f..7fd66dec859d7ffc7a23db17e441ce5eda44592c 100644 (file)
@@ -207,7 +207,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
 
 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
 {
-       return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
+       return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
 }
 
 static struct dst_entry *
@@ -235,7 +235,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
                        goto out_err;
                }
        }
-       ipv6_addr_copy(ret_saddr, &fl6.saddr);
+       *ret_saddr = fl6.saddr;
        return dst;
 
 out_err:
@@ -279,7 +279,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
                                  atomic_read(&rt->dst.__refcnt));
                }
                if (ret_saddr)
-                       ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
+                       *ret_saddr = dest->dst_saddr.in6;
                spin_unlock(&dest->dst_lock);
        } else {
                dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -541,7 +541,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -658,7 +658,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -705,7 +705,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        /* mangle the packet */
        if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
                goto tx_error;
-       ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
+       ipv6_hdr(skb)->daddr = cp->daddr.in6;
 
        if (!local || !skb->dev) {
                /* drop the old route when skb is not shared */
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
        iph->priority           =       old_iph->priority;
        memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
-       ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
-       ipv6_addr_copy(&iph->saddr, &saddr);
+       iph->daddr = cp->daddr.in6;
+       iph->saddr = saddr;
        iph->hop_limit          =       old_iph->hop_limit;
 
        /* Another hack: avoid icmp_send in ip_fragment */
@@ -1173,7 +1173,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -1293,7 +1293,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
         * Avoid duplicate tuple in reply direction for NAT traffic
         * to local address when connection is sync-ed
         */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (cp->flags & IP_VS_CONN_F_SYNC && local) {
                enum ip_conntrack_info ctinfo;
                struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
index 369df3f08d42635da9245558eca0cd950d1417f5..f4f8cda05986cc09d4283c9a753e7d29a5508e0b 100644 (file)
@@ -18,7 +18,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 
-static int nf_ct_acct __read_mostly;
+static bool nf_ct_acct __read_mostly;
 
 module_param_named(acct, nf_ct_acct, bool, 0644);
 MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
@@ -46,8 +46,8 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
                return 0;
 
        return seq_printf(s, "packets=%llu bytes=%llu ",
-                         (unsigned long long)acct[dir].packets,
-                         (unsigned long long)acct[dir].bytes);
+                         (unsigned long long)atomic64_read(&acct[dir].packets),
+                         (unsigned long long)atomic64_read(&acct[dir].bytes));
 };
 EXPORT_SYMBOL_GPL(seq_print_acct);
 
index 7202b0631cd6eb725debc17062a0d1f4a1087f41..e875f8902db3e7224b566e88e7bb82a73646a24f 100644 (file)
@@ -67,6 +67,7 @@ DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
 unsigned int nf_conntrack_hash_rnd __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
 {
@@ -1044,10 +1045,8 @@ acct:
 
                acct = nf_conn_acct_find(ct);
                if (acct) {
-                       spin_lock_bh(&ct->lock);
-                       acct[CTINFO2DIR(ctinfo)].packets++;
-                       acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
-                       spin_unlock_bh(&ct->lock);
+                       atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+                       atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
                }
        }
 }
@@ -1063,11 +1062,9 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
 
                acct = nf_conn_acct_find(ct);
                if (acct) {
-                       spin_lock_bh(&ct->lock);
-                       acct[CTINFO2DIR(ctinfo)].packets++;
-                       acct[CTINFO2DIR(ctinfo)].bytes +=
-                               skb->len - skb_network_offset(skb);
-                       spin_unlock_bh(&ct->lock);
+                       atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+                       atomic64_add(skb->len - skb_network_offset(skb),
+                                    &acct[CTINFO2DIR(ctinfo)].bytes);
                }
        }
 
@@ -1087,7 +1084,7 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
 };
 #endif
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1342,8 +1339,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
                                        get_order(sz));
        if (!hash) {
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-               hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
-                                PAGE_KERNEL);
+               hash = vzalloc(sz);
        }
 
        if (hash && nulls)
index 6b368be937c615610a7fa7b90d26b58a2fa041d5..b62c4148b92131444f6e132cb55a058991d68379 100644 (file)
 
 static DEFINE_MUTEX(nf_ct_ecache_mutex);
 
-struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-
-struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_expect_event_cb);
-
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
 void nf_ct_deliver_cached_events(struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        unsigned long events;
        struct nf_ct_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_conntrack_event_cb);
+       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
@@ -83,19 +78,20 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
 
-int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
+int nf_conntrack_register_notifier(struct net *net,
+                                  struct nf_ct_event_notifier *new)
 {
        int ret = 0;
        struct nf_ct_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_conntrack_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        if (notify != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
-       RCU_INIT_POINTER(nf_conntrack_event_cb, new);
+       RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
        mutex_unlock(&nf_ct_ecache_mutex);
        return ret;
 
@@ -105,32 +101,34 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
 
-void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
+void nf_conntrack_unregister_notifier(struct net *net,
+                                     struct nf_ct_event_notifier *new)
 {
        struct nf_ct_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_conntrack_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        BUG_ON(notify != new);
-       RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
+       RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
 
-int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
+int nf_ct_expect_register_notifier(struct net *net,
+                                  struct nf_exp_event_notifier *new)
 {
        int ret = 0;
        struct nf_exp_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_expect_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        if (notify != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
-       RCU_INIT_POINTER(nf_expect_event_cb, new);
+       RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
        mutex_unlock(&nf_ct_ecache_mutex);
        return ret;
 
@@ -140,15 +138,16 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
 
-void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
+void nf_ct_expect_unregister_notifier(struct net *net,
+                                     struct nf_exp_event_notifier *new)
 {
        struct nf_exp_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_expect_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        BUG_ON(notify != new);
-       RCU_INIT_POINTER(nf_expect_event_cb, NULL);
+       RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
index 340c80d968d412ac77684a79b9bb98ceccb68b3f..4147ba3f653c085d678e5d7c67b3206553dace20 100644 (file)
@@ -38,8 +38,6 @@ unsigned int nf_ct_expect_max __read_mostly;
 
 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
 
-static HLIST_HEAD(nf_ct_userspace_expect_list);
-
 /* nf_conntrack_expect helper functions */
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
                                u32 pid, int report)
@@ -47,14 +45,14 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
        struct nf_conn_help *master_help = nfct_help(exp->master);
        struct net *net = nf_ct_exp_net(exp);
 
+       NF_CT_ASSERT(master_help);
        NF_CT_ASSERT(!timer_pending(&exp->timeout));
 
        hlist_del_rcu(&exp->hnode);
        net->ct.expect_count--;
 
        hlist_del(&exp->lnode);
-       if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
-               master_help->expecting[exp->class]--;
+       master_help->expecting[exp->class]--;
 
        nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
        nf_ct_expect_put(exp);
@@ -314,37 +312,34 @@ void nf_ct_expect_put(struct nf_conntrack_expect *exp)
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
 
-static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
+static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
 {
        struct nf_conn_help *master_help = nfct_help(exp->master);
+       struct nf_conntrack_helper *helper;
        struct net *net = nf_ct_exp_net(exp);
-       const struct nf_conntrack_expect_policy *p;
        unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
 
        /* two references : one for hash insert, one for the timer */
        atomic_add(2, &exp->use);
 
-       if (master_help) {
-               hlist_add_head(&exp->lnode, &master_help->expectations);
-               master_help->expecting[exp->class]++;
-       } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
-               hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
+       hlist_add_head(&exp->lnode, &master_help->expectations);
+       master_help->expecting[exp->class]++;
 
        hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
        net->ct.expect_count++;
 
        setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
                    (unsigned long)exp);
-       if (master_help) {
-               p = &rcu_dereference_protected(
-                               master_help->helper,
-                               lockdep_is_held(&nf_conntrack_lock)
-                               )->expect_policy[exp->class];
-               exp->timeout.expires = jiffies + p->timeout * HZ;
+       helper = rcu_dereference_protected(master_help->helper,
+                                          lockdep_is_held(&nf_conntrack_lock));
+       if (helper) {
+               exp->timeout.expires = jiffies +
+                       helper->expect_policy[exp->class].timeout * HZ;
        }
        add_timer(&exp->timeout);
 
        NF_CT_STAT_INC(net, expect_create);
+       return 0;
 }
 
 /* Race with expectations being used means we could have none to find; OK. */
@@ -389,14 +384,13 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
        struct nf_conntrack_expect *i;
        struct nf_conn *master = expect->master;
        struct nf_conn_help *master_help = nfct_help(master);
+       struct nf_conntrack_helper *helper;
        struct net *net = nf_ct_exp_net(expect);
        struct hlist_node *n;
        unsigned int h;
        int ret = 1;
 
-       /* Don't allow expectations created from kernel-space with no helper */
-       if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
-           (!master_help || (master_help && !master_help->helper))) {
+       if (!master_help) {
                ret = -ESHUTDOWN;
                goto out;
        }
@@ -414,11 +408,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
                }
        }
        /* Will be over limit? */
-       if (master_help) {
-               p = &rcu_dereference_protected(
-                       master_help->helper,
-                       lockdep_is_held(&nf_conntrack_lock)
-                       )->expect_policy[expect->class];
+       helper = rcu_dereference_protected(master_help->helper,
+                                          lockdep_is_held(&nf_conntrack_lock));
+       if (helper) {
+               p = &helper->expect_policy[expect->class];
                if (p->max_expected &&
                    master_help->expecting[expect->class] >= p->max_expected) {
                        evict_oldest_expect(master, expect);
@@ -450,8 +443,9 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
        if (ret <= 0)
                goto out;
 
-       ret = 0;
-       nf_ct_expect_insert(expect);
+       ret = nf_ct_expect_insert(expect);
+       if (ret < 0)
+               goto out;
        spin_unlock_bh(&nf_conntrack_lock);
        nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
        return ret;
@@ -461,22 +455,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
 
-void nf_ct_remove_userspace_expectations(void)
-{
-       struct nf_conntrack_expect *exp;
-       struct hlist_node *n, *next;
-
-       hlist_for_each_entry_safe(exp, n, next,
-                                 &nf_ct_userspace_expect_list, lnode) {
-               if (del_timer(&exp->timeout)) {
-                       nf_ct_unlink_expect(exp);
-                       nf_ct_expect_put(exp);
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
-
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
 struct ct_expect_iter_state {
        struct seq_net_private p;
        unsigned int bucket;
@@ -604,25 +583,25 @@ static const struct file_operations exp_file_ops = {
        .llseek  = seq_lseek,
        .release = seq_release_net,
 };
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 
 static int exp_proc_init(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
        struct proc_dir_entry *proc;
 
        proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
        if (!proc)
                return -ENOMEM;
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
        return 0;
 }
 
 static void exp_proc_remove(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
        proc_net_remove(net, "nf_conntrack_expect");
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 }
 
 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
index 6f5801eac99923eaaa66e3dd8572678df0ac8ceb..8c5c95c6d34f1be6912d6ffe5ad7066b23c24f5d 100644 (file)
@@ -42,7 +42,7 @@ static u_int16_t ports[MAX_PORTS];
 static unsigned int ports_c;
 module_param_array(ports, ushort, &ports_c, 0400);
 
-static int loose;
+static bool loose;
 module_param(loose, bool, 0600);
 
 unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
index f03c2d4539f6b8517530c899b2bc2419e4f80f88..722291f8af72f79379a10cad03cc8658e2959397 100644 (file)
@@ -42,7 +42,7 @@ static int gkrouted_only __read_mostly = 1;
 module_param(gkrouted_only, int, 0600);
 MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
 
-static int callforward_filter __read_mostly = 1;
+static bool callforward_filter __read_mostly = true;
 module_param(callforward_filter, bool, 0600);
 MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
                                     "if both endpoints are on different sides "
@@ -743,17 +743,16 @@ static int callforward_do_filter(const union nf_inet_addr *src,
                }
                break;
        }
-#if defined(CONFIG_NF_CONNTRACK_IPV6) || \
-    defined(CONFIG_NF_CONNTRACK_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
        case AF_INET6: {
                struct flowi6 fl1, fl2;
                struct rt6_info *rt1, *rt2;
 
                memset(&fl1, 0, sizeof(fl1));
-               ipv6_addr_copy(&fl1.daddr, &src->in6);
+               fl1.daddr = src->in6;
 
                memset(&fl2, 0, sizeof(fl2));
-               ipv6_addr_copy(&fl2.daddr, &dst->in6);
+               fl2.daddr = dst->in6;
                if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
                                   flowi6_to_flowi(&fl1), false)) {
                        if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
index 93c4bdbfc1ae52da32a1e466ca9ca881665041bb..c9e0de08aa872bcbd61a4bf0b8171669a632a914 100644 (file)
@@ -121,6 +121,18 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
        int ret = 0;
 
        if (tmpl != NULL) {
+               /* we've got a userspace helper. */
+               if (tmpl->status & IPS_USERSPACE_HELPER) {
+                       help = nf_ct_helper_ext_add(ct, flags);
+                       if (help == NULL) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       rcu_assign_pointer(help->helper, NULL);
+                       __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
+                       ret = 0;
+                       goto out;
+               }
                help = nfct_help(tmpl);
                if (help != NULL)
                        helper = help->helper;
index e58aa9b1fe8a043226c4bc7ee90f5145a7adc316..e07dc3ae930ea3a2aa3947eb841e47a3cb23173f 100644 (file)
@@ -4,7 +4,7 @@
  * (C) 2001 by Jay Schulist <jschlst@samba.org>
  * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
  * (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
  *
  * Initial connection tracking via netlink development funded and
  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -135,7 +135,7 @@ nla_put_failure:
 static inline int
 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       long timeout = (ct->timeout.expires - jiffies) / HZ;
+       long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
 
        if (timeout < 0)
                timeout = 0;
@@ -203,25 +203,18 @@ nla_put_failure:
 }
 
 static int
-ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
-                       enum ip_conntrack_dir dir)
+dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
+             enum ip_conntrack_dir dir)
 {
        enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
        struct nlattr *nest_count;
-       const struct nf_conn_counter *acct;
-
-       acct = nf_conn_acct_find(ct);
-       if (!acct)
-               return 0;
 
        nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
        if (!nest_count)
                goto nla_put_failure;
 
-       NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS,
-                    cpu_to_be64(acct[dir].packets));
-       NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES,
-                    cpu_to_be64(acct[dir].bytes));
+       NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
+       NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
 
        nla_nest_end(skb, nest_count);
 
@@ -231,6 +224,27 @@ nla_put_failure:
        return -1;
 }
 
+static int
+ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
+                       enum ip_conntrack_dir dir, int type)
+{
+       struct nf_conn_counter *acct;
+       u64 pkts, bytes;
+
+       acct = nf_conn_acct_find(ct);
+       if (!acct)
+               return 0;
+
+       if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
+               pkts = atomic64_xchg(&acct[dir].packets, 0);
+               bytes = atomic64_xchg(&acct[dir].bytes, 0);
+       } else {
+               pkts = atomic64_read(&acct[dir].packets);
+               bytes = atomic64_read(&acct[dir].bytes);
+       }
+       return dump_counters(skb, pkts, bytes, dir);
+}
+
 static int
 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
 {
@@ -393,15 +407,15 @@ nla_put_failure:
 }
 
 static int
-ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
-                   int event, struct nf_conn *ct)
+ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+                   struct nf_conn *ct)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        struct nlattr *nest_parms;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
 
-       event |= NFNL_SUBSYS_CTNETLINK << 8;
+       event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
        nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
@@ -430,8 +444,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
 
        if (ctnetlink_dump_status(skb, ct) < 0 ||
            ctnetlink_dump_timeout(skb, ct) < 0 ||
-           ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-           ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+           ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
+           ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
            ctnetlink_dump_timestamp(skb, ct) < 0 ||
            ctnetlink_dump_protoinfo(skb, ct) < 0 ||
            ctnetlink_dump_helpinfo(skb, ct) < 0 ||
@@ -612,8 +626,10 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
                goto nla_put_failure;
 
        if (events & (1 << IPCT_DESTROY)) {
-               if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-                   ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+               if (ctnetlink_dump_counters(skb, ct,
+                                           IP_CT_DIR_ORIGINAL, type) < 0 ||
+                   ctnetlink_dump_counters(skb, ct,
+                                           IP_CT_DIR_REPLY, type) < 0 ||
                    ctnetlink_dump_timestamp(skb, ct) < 0)
                        goto nla_put_failure;
        } else {
@@ -709,20 +725,13 @@ restart:
                        }
                        if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
                                                cb->nlh->nlmsg_seq,
-                                               IPCTNL_MSG_CT_NEW, ct) < 0) {
+                                               NFNL_MSG_TYPE(
+                                                       cb->nlh->nlmsg_type),
+                                               ct) < 0) {
                                nf_conntrack_get(&ct->ct_general);
                                cb->args[1] = (unsigned long)ct;
                                goto out;
                        }
-
-                       if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) ==
-                                               IPCTNL_MSG_CT_GET_CTRZERO) {
-                               struct nf_conn_counter *acct;
-
-                               acct = nf_conn_acct_find(ct);
-                               if (acct)
-                                       memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
-                       }
                }
                if (cb->args[1]) {
                        cb->args[1] = 0;
@@ -1001,7 +1010,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
        rcu_read_lock();
        err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
-                                 IPCTNL_MSG_CT_NEW, ct);
+                                 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
        rcu_read_unlock();
        nf_ct_put(ct);
        if (err <= 0)
@@ -1087,14 +1096,14 @@ ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 
        if (cda[CTA_NAT_DST]) {
                ret = ctnetlink_parse_nat_setup(ct,
-                                               IP_NAT_MANIP_DST,
+                                               NF_NAT_MANIP_DST,
                                                cda[CTA_NAT_DST]);
                if (ret < 0)
                        return ret;
        }
        if (cda[CTA_NAT_SRC]) {
                ret = ctnetlink_parse_nat_setup(ct,
-                                               IP_NAT_MANIP_SRC,
+                                               NF_NAT_MANIP_SRC,
                                                cda[CTA_NAT_SRC]);
                if (ret < 0)
                        return ret;
@@ -1358,12 +1367,15 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                                                    nf_ct_protonum(ct));
                if (helper == NULL) {
                        rcu_read_unlock();
+                       spin_unlock_bh(&nf_conntrack_lock);
 #ifdef CONFIG_MODULES
                        if (request_module("nfct-helper-%s", helpname) < 0) {
+                               spin_lock_bh(&nf_conntrack_lock);
                                err = -EOPNOTSUPP;
                                goto err1;
                        }
 
+                       spin_lock_bh(&nf_conntrack_lock);
                        rcu_read_lock();
                        helper = __nf_conntrack_helper_find(helpname,
                                                            nf_ct_l3num(ct),
@@ -1638,7 +1650,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
 {
        struct nf_conn *master = exp->master;
-       long timeout = (exp->timeout.expires - jiffies) / HZ;
+       long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
        struct nf_conn_help *help;
 
        if (timeout < 0)
@@ -1847,7 +1859,9 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (cda[CTA_EXPECT_MASTER])
+       if (cda[CTA_EXPECT_TUPLE])
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+       else if (cda[CTA_EXPECT_MASTER])
                err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
        else
                return -EINVAL;
@@ -1869,25 +1883,30 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
 
        err = -ENOMEM;
        skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (skb2 == NULL)
+       if (skb2 == NULL) {
+               nf_ct_expect_put(exp);
                goto out;
+       }
 
        rcu_read_lock();
        err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
                                      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
        rcu_read_unlock();
+       nf_ct_expect_put(exp);
        if (err <= 0)
                goto free;
 
-       nf_ct_expect_put(exp);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (err < 0)
+               goto out;
 
-       return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       return 0;
 
 free:
        kfree_skb(skb2);
 out:
-       nf_ct_expect_put(exp);
-       return err;
+       /* this avoids a loop in nfnetlink. */
+       return err == -EAGAIN ? -ENOBUFS : err;
 }
 
 static int
@@ -2023,6 +2042,10 @@ ctnetlink_create_expect(struct net *net, u16 zone,
        }
        help = nfct_help(ct);
        if (!help) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+       if (test_bit(IPS_USERSPACE_HELPER_BIT, &ct->status)) {
                if (!cda[CTA_EXPECT_TIMEOUT]) {
                        err = -EINVAL;
                        goto out;
@@ -2163,6 +2186,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
 
+static int __net_init ctnetlink_net_init(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       int ret;
+
+       ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
+       if (ret < 0) {
+               pr_err("ctnetlink_init: cannot register notifier.\n");
+               goto err_out;
+       }
+
+       ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
+       if (ret < 0) {
+               pr_err("ctnetlink_init: cannot expect register notifier.\n");
+               goto err_unreg_notifier;
+       }
+#endif
+       return 0;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+err_unreg_notifier:
+       nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+err_out:
+       return ret;
+#endif
+}
+
+static void ctnetlink_net_exit(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
+       nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+#endif
+}
+
+static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
+{
+       struct net *net;
+
+       list_for_each_entry(net, net_exit_list, exit_list)
+               ctnetlink_net_exit(net);
+}
+
+static struct pernet_operations ctnetlink_net_ops = {
+       .init           = ctnetlink_net_init,
+       .exit_batch     = ctnetlink_net_exit_batch,
+};
+
 static int __init ctnetlink_init(void)
 {
        int ret;
@@ -2180,28 +2251,15 @@ static int __init ctnetlink_init(void)
                goto err_unreg_subsys;
        }
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-       ret = nf_conntrack_register_notifier(&ctnl_notifier);
-       if (ret < 0) {
-               pr_err("ctnetlink_init: cannot register notifier.\n");
+       if (register_pernet_subsys(&ctnetlink_net_ops)) {
+               pr_err("ctnetlink_init: cannot register pernet operations\n");
                goto err_unreg_exp_subsys;
        }
 
-       ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
-       if (ret < 0) {
-               pr_err("ctnetlink_init: cannot expect register notifier.\n");
-               goto err_unreg_notifier;
-       }
-#endif
-
        return 0;
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
-       nf_conntrack_unregister_notifier(&ctnl_notifier);
 err_unreg_exp_subsys:
        nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
 err_unreg_subsys:
        nfnetlink_subsys_unregister(&ctnl_subsys);
 err_out:
@@ -2212,12 +2270,7 @@ static void __exit ctnetlink_exit(void)
 {
        pr_info("ctnetlink: unregistering from nfnetlink.\n");
 
-       nf_ct_remove_userspace_expectations();
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-       nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
-       nf_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
+       unregister_pernet_subsys(&ctnetlink_net_ops);
        nfnetlink_subsys_unregister(&ctnl_exp_subsys);
        nfnetlink_subsys_unregister(&ctnl_subsys);
 }
index 2e664a69d7dbc4907d7c589b2a5addc6b48ef739..d6dde6dc09e6679cf1206237d1456f1ecc2f21c0 100644 (file)
@@ -629,7 +629,7 @@ static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
        return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
                          struct nf_conn *ct)
 {
@@ -770,7 +770,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
        .error                  = dccp_error,
        .print_tuple            = dccp_print_tuple,
        .print_conntrack        = dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = dccp_to_nlattr,
        .nlattr_size            = dccp_nlattr_size,
        .from_nlattr            = nlattr_to_dccp,
@@ -792,7 +792,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
        .error                  = dccp_error,
        .print_tuple            = dccp_print_tuple,
        .print_conntrack        = dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = dccp_to_nlattr,
        .nlattr_size            = dccp_nlattr_size,
        .from_nlattr            = nlattr_to_dccp,
index d69facdd9a7ac527cb947dfd2112b416aa41a19d..f0338791b822d238d926486e78084afe5089b1a5 100644 (file)
@@ -291,7 +291,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
        .new             = gre_new,
        .destroy         = gre_destroy,
        .me              = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
        .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
index 6772b11546543a628706f765eb23b06ecf3e6b74..afa69136061ac18c7b9e1f1067f9679ddcbf0984 100644 (file)
@@ -461,7 +461,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -666,7 +666,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
        .packet                 = sctp_packet,
        .new                    = sctp_new,
        .me                     = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = sctp_to_nlattr,
        .nlattr_size            = sctp_nlattr_size,
        .from_nlattr            = nlattr_to_sctp,
@@ -696,7 +696,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
        .packet                 = sctp_packet,
        .new                    = sctp_new,
        .me                     = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = sctp_to_nlattr,
        .nlattr_size            = sctp_nlattr_size,
        .from_nlattr            = nlattr_to_sctp,
index 8235b86b4e87efdccf73d0f552f0a1a01a9f3763..97b9f3ebf28c2113be8ea5bfa015c75af0df9dbf 100644 (file)
@@ -1126,7 +1126,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1447,7 +1447,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
        .packet                 = tcp_packet,
        .new                    = tcp_new,
        .error                  = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = tcp_to_nlattr,
        .nlattr_size            = tcp_nlattr_size,
        .from_nlattr            = nlattr_to_tcp,
@@ -1479,7 +1479,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
        .packet                 = tcp_packet,
        .new                    = tcp_new,
        .error                  = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = tcp_to_nlattr,
        .nlattr_size            = tcp_nlattr_size,
        .from_nlattr            = nlattr_to_tcp,
index 8289088b8218fff057fdf2a2921ceb2bedbb31f2..5f35757fbff031ab224e758c0c3d8c502c86db55 100644 (file)
@@ -188,7 +188,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
        .packet                 = udp_packet,
        .new                    = udp_new,
        .error                  = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
@@ -216,7 +216,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
        .packet                 = udp_packet,
        .new                    = udp_new,
        .error                  = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
index 263b5a72588d9c5eaa2af6d2601bd97745f472b8..f52ca1181013e9881d6c2c34d6bccbd4e366d0e2 100644 (file)
@@ -174,7 +174,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
        .packet                 = udplite_packet,
        .new                    = udplite_new,
        .error                  = udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
@@ -198,7 +198,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
        .packet                 = udplite_packet,
        .new                    = udplite_new,
        .error                  = udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
index 05e9feb101c36845942ecc0b33291c55abf52dab..885f5ab9bc28efed6b3d7116ed87887bb9fd33b9 100644 (file)
@@ -34,7 +34,7 @@
 
 MODULE_LICENSE("GPL");
 
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
 int
 print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
             const struct nf_conntrack_l3proto *l3proto,
@@ -396,7 +396,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
 static void nf_conntrack_standalone_fini_proc(struct net *net)
 {
 }
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 
 /* Sysctl support */
 
index af7dd31af0a1e8c4761981cc03747a91feb99d22..e8d27afbbdb90f01b7a7f24a3cb7e5ece2bed7ec 100644 (file)
@@ -15,7 +15,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_timestamp.h>
 
-static int nf_ct_tstamp __read_mostly;
+static bool nf_ct_tstamp __read_mostly;
 
 module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
 MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
new file mode 100644 (file)
index 0000000..11ba013
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+ * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2011 Intra2net AG <http://www.intra2net.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_acct.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
+
+static LIST_HEAD(nfnl_acct_list);
+
+struct nf_acct {
+       atomic64_t              pkts;
+       atomic64_t              bytes;
+       struct list_head        head;
+       atomic_t                refcnt;
+       char                    name[NFACCT_NAME_MAX];
+       struct rcu_head         rcu_head;
+};
+
+static int
+nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
+            const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+       struct nf_acct *nfacct, *matching = NULL;
+       char *acct_name;
+
+       if (!tb[NFACCT_NAME])
+               return -EINVAL;
+
+       acct_name = nla_data(tb[NFACCT_NAME]);
+
+       list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+               if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
+                       continue;
+
+                if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+
+               matching = nfacct;
+               break;
+        }
+
+       if (matching) {
+               if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+                       /* reset counters if you request a replacement. */
+                       atomic64_set(&matching->pkts, 0);
+                       atomic64_set(&matching->bytes, 0);
+                       return 0;
+               }
+               return -EBUSY;
+       }
+
+       nfacct = kzalloc(sizeof(struct nf_acct), GFP_KERNEL);
+       if (nfacct == NULL)
+               return -ENOMEM;
+
+       strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
+
+       if (tb[NFACCT_BYTES]) {
+               atomic64_set(&nfacct->bytes,
+                            be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES])));
+       }
+       if (tb[NFACCT_PKTS]) {
+               atomic64_set(&nfacct->pkts,
+                            be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS])));
+       }
+       atomic_set(&nfacct->refcnt, 1);
+       list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+       return 0;
+}
+
+static int
+nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+                  int event, struct nf_acct *acct)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       u64 pkts, bytes;
+
+       event |= NFNL_SUBSYS_ACCT << 8;
+       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       if (nlh == NULL)
+               goto nlmsg_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = AF_UNSPEC;
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = 0;
+
+       NLA_PUT_STRING(skb, NFACCT_NAME, acct->name);
+
+       if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
+               pkts = atomic64_xchg(&acct->pkts, 0);
+               bytes = atomic64_xchg(&acct->bytes, 0);
+       } else {
+               pkts = atomic64_read(&acct->pkts);
+               bytes = atomic64_read(&acct->bytes);
+       }
+       NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts));
+       NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes));
+       NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)));
+
+       nlmsg_end(skb, nlh);
+       return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -1;
+}
+
+static int
+nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct nf_acct *cur, *last;
+
+       if (cb->args[2])
+               return 0;
+
+       last = (struct nf_acct *)cb->args[1];
+       if (cb->args[1])
+               cb->args[1] = 0;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+               if (last && cur != last)
+                       continue;
+
+               if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).pid,
+                                      cb->nlh->nlmsg_seq,
+                                      NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+                                      NFNL_MSG_ACCT_NEW, cur) < 0) {
+                       cb->args[1] = (unsigned long)cur;
+                       break;
+               }
+       }
+       if (!cb->args[1])
+               cb->args[2] = 1;
+       rcu_read_unlock();
+       return skb->len;
+}
+
+static int
+nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
+            const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+       int ret = -ENOENT;
+       struct nf_acct *cur;
+       char *acct_name;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump,
+                                         NULL, 0);
+       }
+
+       if (!tb[NFACCT_NAME])
+               return -EINVAL;
+       acct_name = nla_data(tb[NFACCT_NAME]);
+
+       list_for_each_entry(cur, &nfnl_acct_list, head) {
+               struct sk_buff *skb2;
+
+               if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
+                       continue;
+
+               skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               if (skb2 == NULL) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).pid,
+                                        nlh->nlmsg_seq,
+                                        NFNL_MSG_TYPE(nlh->nlmsg_type),
+                                        NFNL_MSG_ACCT_NEW, cur);
+               if (ret <= 0) {
+                       kfree_skb(skb2);
+                       break;
+               }
+               ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+                                       MSG_DONTWAIT);
+               if (ret > 0)
+                       ret = 0;
+
+               /* this avoids a loop in nfnetlink. */
+               return ret == -EAGAIN ? -ENOBUFS : ret;
+       }
+       return ret;
+}
+
+/* try to delete object, fail if it is still in use. */
+static int nfnl_acct_try_del(struct nf_acct *cur)
+{
+       int ret = 0;
+
+       /* we want to avoid races with nfnl_acct_find_get. */
+       if (atomic_dec_and_test(&cur->refcnt)) {
+               /* We are protected by nfnl mutex. */
+               list_del_rcu(&cur->head);
+               kfree_rcu(cur, rcu_head);
+       } else {
+               /* still in use, restore reference counter. */
+               atomic_inc(&cur->refcnt);
+               ret = -EBUSY;
+       }
+       return ret;
+}
+
+static int
+nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
+            const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+       char *acct_name;
+       struct nf_acct *cur;
+       int ret = -ENOENT;
+
+       if (!tb[NFACCT_NAME]) {
+               list_for_each_entry(cur, &nfnl_acct_list, head)
+                       nfnl_acct_try_del(cur);
+
+               return 0;
+       }
+       acct_name = nla_data(tb[NFACCT_NAME]);
+
+       list_for_each_entry(cur, &nfnl_acct_list, head) {
+               if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
+                       continue;
+
+               ret = nfnl_acct_try_del(cur);
+               if (ret < 0)
+                       return ret;
+
+               break;
+       }
+       return ret;
+}
+
+static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
+       [NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 },
+       [NFACCT_BYTES] = { .type = NLA_U64 },
+       [NFACCT_PKTS] = { .type = NLA_U64 },
+};
+
+static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
+       [NFNL_MSG_ACCT_NEW]             = { .call = nfnl_acct_new,
+                                           .attr_count = NFACCT_MAX,
+                                           .policy = nfnl_acct_policy },
+       [NFNL_MSG_ACCT_GET]             = { .call = nfnl_acct_get,
+                                           .attr_count = NFACCT_MAX,
+                                           .policy = nfnl_acct_policy },
+       [NFNL_MSG_ACCT_GET_CTRZERO]     = { .call = nfnl_acct_get,
+                                           .attr_count = NFACCT_MAX,
+                                           .policy = nfnl_acct_policy },
+       [NFNL_MSG_ACCT_DEL]             = { .call = nfnl_acct_del,
+                                           .attr_count = NFACCT_MAX,
+                                           .policy = nfnl_acct_policy },
+};
+
+static const struct nfnetlink_subsystem nfnl_acct_subsys = {
+       .name                           = "acct",
+       .subsys_id                      = NFNL_SUBSYS_ACCT,
+       .cb_count                       = NFNL_MSG_ACCT_MAX,
+       .cb                             = nfnl_acct_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
+
+struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+{
+       struct nf_acct *cur, *acct = NULL;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+               if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
+                       continue;
+
+               if (!try_module_get(THIS_MODULE))
+                       goto err;
+
+               if (!atomic_inc_not_zero(&cur->refcnt)) {
+                       module_put(THIS_MODULE);
+                       goto err;
+               }
+
+               acct = cur;
+               break;
+       }
+err:
+       rcu_read_unlock();
+       return acct;
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
+
+void nfnl_acct_put(struct nf_acct *acct)
+{
+       atomic_dec(&acct->refcnt);
+       module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_put);
+
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
+{
+       atomic64_inc(&nfacct->pkts);
+       atomic64_add(skb->len, &nfacct->bytes);
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_update);
+
+static int __init nfnl_acct_init(void)
+{
+       int ret;
+
+       pr_info("nfnl_acct: registering with nfnetlink.\n");
+       ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
+       if (ret < 0) {
+               pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
+               goto err_out;
+       }
+       return 0;
+err_out:
+       return ret;
+}
+
+static void __exit nfnl_acct_exit(void)
+{
+       struct nf_acct *cur, *tmp;
+
+       pr_info("nfnl_acct: unregistering from nfnetlink.\n");
+       nfnetlink_subsys_unregister(&nfnl_acct_subsys);
+
+       list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
+               list_del_rcu(&cur->head);
+               /* We are sure that our objects have no clients at this point,
+                * it's safe to release them all without checking refcnt. */
+               kfree_rcu(cur, rcu_head);
+       }
+}
+
+module_init(nfnl_acct_init);
+module_exit(nfnl_acct_exit);
index 4bca15a0c38547ed7701684034733fe009163df0..ba92824086f3c3843c7158c5a5de86fe0db53dc4 100644 (file)
@@ -98,6 +98,7 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
        struct ipv6hdr _ip6h;
        const struct ipv6hdr *ih;
        u8 nexthdr;
+       __be16 frag_off;
        int offset;
 
        ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
@@ -108,7 +109,7 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
 
        nexthdr = ih->nexthdr;
        offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
-                                 &nexthdr);
+                                 &nexthdr, &frag_off);
 
        audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
                         &ih->saddr, &ih->daddr, nexthdr);
index 0221d10de75a517dbc4c5e5c7d40b432abef15a3..8e87123f1373a1a4a42ddb6211c1737d2346047d 100644 (file)
@@ -62,8 +62,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
        int ret = 0;
        u8 proto;
 
-       if (info->flags & ~XT_CT_NOTRACK)
-               return -EINVAL;
+       if (info->flags & ~(XT_CT_NOTRACK | XT_CT_USERSPACE_HELPER))
+               return -EOPNOTSUPP;
 
        if (info->flags & XT_CT_NOTRACK) {
                ct = nf_ct_untracked_get();
@@ -92,7 +92,9 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
                                  GFP_KERNEL))
                goto err3;
 
-       if (info->helper[0]) {
+       if (info->flags & XT_CT_USERSPACE_HELPER) {
+               __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
+       } else if (info->helper[0]) {
                ret = -ENOENT;
                proto = xt_ct_find_proto(par);
                if (!proto) {
index d4f4b5d66b2075cb816421387a2e0f6f78bc01f8..95237c89607a7ee330273abf693c77af607d108a 100644 (file)
@@ -49,7 +49,7 @@ static u32 hash_v4(const struct sk_buff *skb)
        return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 hash_v6(const struct sk_buff *skb)
 {
        const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -74,7 +74,7 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
                if (par->family == NFPROTO_IPV4)
                        queue = (((u64) hash_v4(skb) * info->queues_total) >>
                                 32) + queue;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
                else if (par->family == NFPROTO_IPV6)
                        queue = (((u64) hash_v6(skb) * info->queues_total) >>
                                 32) + queue;
index 9e63b43faeed030b5b51118f54722bc68cd2997d..190ad37c5cf8ef7cc6acaca03e3494fad5a8a724 100644 (file)
@@ -161,7 +161,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
                struct flowi6 *fl6 = &fl.u.ip6;
 
                memset(fl6, 0, sizeof(*fl6));
-               ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr);
+               fl6->daddr = ipv6_hdr(skb)->saddr;
        }
        rcu_read_lock();
        ai = nf_get_afinfo(family);
@@ -198,17 +198,18 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static unsigned int
 tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        u8 nexthdr;
+       __be16 frag_off;
        int tcphoff;
        int ret;
 
        nexthdr = ipv6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
        if (tcphoff < 0)
                return NF_DROP;
        ret = tcpmss_mangle_packet(skb, par->targinfo,
@@ -259,7 +260,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
        return -EINVAL;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
 {
        const struct xt_tcpmss_info *info = par->targinfo;
@@ -292,7 +293,7 @@ static struct xt_target tcpmss_tg_reg[] __read_mostly = {
                .proto          = IPPROTO_TCP,
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        {
                .family         = NFPROTO_IPV6,
                .name           = "TCPMSS",
index 9dc9ecfdd546298e4bc6dca11a20844367562416..25fd1c4e1eec3229e8629420e6750ebe74bb6bea 100644 (file)
@@ -80,16 +80,17 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
               sizeof(struct iphdr) + sizeof(struct tcphdr));
 }
 
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
 static unsigned int
 tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        int tcphoff;
        u_int8_t nexthdr;
+       __be16 frag_off;
 
        nexthdr = ipv6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
        if (tcphoff < 0)
                return NF_DROP;
 
@@ -108,7 +109,7 @@ static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = {
                .targetsize = sizeof(struct xt_tcpoptstrip_target_info),
                .me         = THIS_MODULE,
        },
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
        {
                .name       = "TCPOPTSTRIP",
                .family     = NFPROTO_IPV6,
index 5f054a0dbbb1909777c5bd0858e87437aa94c587..3aae66facf9f79d8128857a24461127b0e6389b3 100644 (file)
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_TEE.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #      define WITH_CONNTRACK 1
 #      include <net/netfilter/nf_conntrack.h>
 #endif
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#      define WITH_IPV6 1
-#endif
 
 struct xt_tee_priv {
        struct notifier_block   notifier;
@@ -136,7 +133,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
 static bool
 tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
 {
@@ -196,7 +193,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        }
        return XT_CONTINUE;
 }
-#endif /* WITH_IPV6 */
+#endif
 
 static int tee_netdev_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
@@ -276,7 +273,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
                .destroy    = tee_tg_destroy,
                .me         = THIS_MODULE,
        },
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
        {
                .name       = "TEE",
                .revision   = 1,
index dcfd57eb9d0249cea62289e33e64e76696b2dc00..35a959a096e0a49e8c9d20931f95047b12895576 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #define XT_TPROXY_HAVE_IPV6 1
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
index b77d383cec7818387d3703ba01b79b0c74bf5f52..49c5ff7f6dd67fdbec03b360323aa102ff59e37a 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/ip.h>
 #include <net/route.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #include <net/ip6_fib.h>
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Xtables: address type match");
 MODULE_ALIAS("ipt_addrtype");
 MODULE_ALIAS("ip6t_addrtype");
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
                            const struct in6_addr *addr)
 {
@@ -42,7 +42,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
        int route_err;
 
        memset(&flow, 0, sizeof(flow));
-       ipv6_addr_copy(&flow.daddr, addr);
+       flow.daddr = *addr;
        if (dev)
                flow.flowi6_oif = dev->ifindex;
 
@@ -149,7 +149,7 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
        else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
                dev = par->out;
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        if (par->family == NFPROTO_IPV6)
                return addrtype_mt6(net, dev, skb, info);
 #endif
@@ -190,7 +190,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
                return -EINVAL;
        }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        if (par->family == NFPROTO_IPV6) {
                if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
                        pr_err("ipv6 BLACKHOLE matching not supported\n");
index 5b138506690ec578105911ffb64309b5297d6458..e595e07a759b2df8ccdce5069d1974ec6ccaa754 100644 (file)
@@ -40,46 +40,46 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
        case XT_CONNBYTES_PKTS:
                switch (sinfo->direction) {
                case XT_CONNBYTES_DIR_ORIGINAL:
-                       what = counters[IP_CT_DIR_ORIGINAL].packets;
+                       what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
                        break;
                case XT_CONNBYTES_DIR_REPLY:
-                       what = counters[IP_CT_DIR_REPLY].packets;
+                       what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
                        break;
                case XT_CONNBYTES_DIR_BOTH:
-                       what = counters[IP_CT_DIR_ORIGINAL].packets;
-                       what += counters[IP_CT_DIR_REPLY].packets;
+                       what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
+                       what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
                        break;
                }
                break;
        case XT_CONNBYTES_BYTES:
                switch (sinfo->direction) {
                case XT_CONNBYTES_DIR_ORIGINAL:
-                       what = counters[IP_CT_DIR_ORIGINAL].bytes;
+                       what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
                        break;
                case XT_CONNBYTES_DIR_REPLY:
-                       what = counters[IP_CT_DIR_REPLY].bytes;
+                       what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
                        break;
                case XT_CONNBYTES_DIR_BOTH:
-                       what = counters[IP_CT_DIR_ORIGINAL].bytes;
-                       what += counters[IP_CT_DIR_REPLY].bytes;
+                       what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
+                       what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
                        break;
                }
                break;
        case XT_CONNBYTES_AVGPKT:
                switch (sinfo->direction) {
                case XT_CONNBYTES_DIR_ORIGINAL:
-                       bytes = counters[IP_CT_DIR_ORIGINAL].bytes;
-                       pkts  = counters[IP_CT_DIR_ORIGINAL].packets;
+                       bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
+                       pkts  = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
                        break;
                case XT_CONNBYTES_DIR_REPLY:
-                       bytes = counters[IP_CT_DIR_REPLY].bytes;
-                       pkts  = counters[IP_CT_DIR_REPLY].packets;
+                       bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
+                       pkts  = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
                        break;
                case XT_CONNBYTES_DIR_BOTH:
-                       bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
-                               counters[IP_CT_DIR_REPLY].bytes;
-                       pkts  = counters[IP_CT_DIR_ORIGINAL].packets +
-                               counters[IP_CT_DIR_REPLY].packets;
+                       bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes) +
+                               atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
+                       pkts  = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) +
+                               atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
                        break;
                }
                if (pkts != 0)
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
                break;
        }
 
-       if (sinfo->count.to)
+       if (sinfo->count.to >= sinfo->count.from)
                return what <= sinfo->count.to && what >= sinfo->count.from;
-       else
-               return what >= sinfo->count.from;
+       else /* inverted */
+               return what < sinfo->count.to || what > sinfo->count.from;
 }
 
 static int connbytes_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/netfilter/xt_ecn.c b/net/netfilter/xt_ecn.c
new file mode 100644 (file)
index 0000000..3c831a8
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Xtables module for matching the value of the IPv4/IPv6 and TCP ECN bits
+ *
+ * (C) 2002 by Harald Welte <laforge@gnumonks.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_ecn.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ecn");
+MODULE_ALIAS("ip6t_ecn");
+
+static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_ecn_info *einfo = par->matchinfo;
+       struct tcphdr _tcph;
+       const struct tcphdr *th;
+
+       /* In practice, TCP match does this, so can't fail.  But let's
+        * be good citizens.
+        */
+       th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
+       if (th == NULL)
+               return false;
+
+       if (einfo->operation & XT_ECN_OP_MATCH_ECE) {
+               if (einfo->invert & XT_ECN_OP_MATCH_ECE) {
+                       if (th->ece == 1)
+                               return false;
+               } else {
+                       if (th->ece == 0)
+                               return false;
+               }
+       }
+
+       if (einfo->operation & XT_ECN_OP_MATCH_CWR) {
+               if (einfo->invert & XT_ECN_OP_MATCH_CWR) {
+                       if (th->cwr == 1)
+                               return false;
+               } else {
+                       if (th->cwr == 0)
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static inline bool match_ip(const struct sk_buff *skb,
+                           const struct xt_ecn_info *einfo)
+{
+       return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^
+              !!(einfo->invert & XT_ECN_OP_MATCH_IP);
+}
+
+static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_ecn_info *info = par->matchinfo;
+
+       if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info))
+               return false;
+
+       if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+           !match_tcp(skb, par))
+               return false;
+
+       return true;
+}
+
+static int ecn_mt_check4(const struct xt_mtchk_param *par)
+{
+       const struct xt_ecn_info *info = par->matchinfo;
+       const struct ipt_ip *ip = par->entryinfo;
+
+       if (info->operation & XT_ECN_OP_MATCH_MASK)
+               return -EINVAL;
+
+       if (info->invert & XT_ECN_OP_MATCH_MASK)
+               return -EINVAL;
+
+       if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+           (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
+               pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static inline bool match_ipv6(const struct sk_buff *skb,
+                             const struct xt_ecn_info *einfo)
+{
+       return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) ==
+               einfo->ip_ect) ^
+              !!(einfo->invert & XT_ECN_OP_MATCH_IP);
+}
+
+static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_ecn_info *info = par->matchinfo;
+
+       if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info))
+               return false;
+
+       if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+           !match_tcp(skb, par))
+               return false;
+
+       return true;
+}
+
+static int ecn_mt_check6(const struct xt_mtchk_param *par)
+{
+       const struct xt_ecn_info *info = par->matchinfo;
+       const struct ip6t_ip6 *ip = par->entryinfo;
+
+       if (info->operation & XT_ECN_OP_MATCH_MASK)
+               return -EINVAL;
+
+       if (info->invert & XT_ECN_OP_MATCH_MASK)
+               return -EINVAL;
+
+       if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+           (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
+               pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct xt_match ecn_mt_reg[] __read_mostly = {
+       {
+               .name           = "ecn",
+               .family         = NFPROTO_IPV4,
+               .match          = ecn_mt4,
+               .matchsize      = sizeof(struct xt_ecn_info),
+               .checkentry     = ecn_mt_check4,
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "ecn",
+               .family         = NFPROTO_IPV6,
+               .match          = ecn_mt6,
+               .matchsize      = sizeof(struct xt_ecn_info),
+               .checkentry     = ecn_mt_check6,
+               .me             = THIS_MODULE,
+       },
+};
+
+static int __init ecn_mt_init(void)
+{
+       return xt_register_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
+}
+
+static void __exit ecn_mt_exit(void)
+{
+       xt_unregister_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
+}
+
+module_init(ecn_mt_init);
+module_exit(ecn_mt_exit);
index dfd52bad1523f73535110e0d1b2c9f4a87c6545a..8e4992101875086cd412154d102793c99ffa6bed 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/mm.h>
 #include <linux/in.h>
 #include <linux/ip.h>
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #include <linux/ipv6.h>
 #include <net/ipv6.h>
 #endif
@@ -64,7 +64,7 @@ struct dsthash_dst {
                        __be32 src;
                        __be32 dst;
                } ip;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
                struct {
                        __be32 src[4];
                        __be32 dst[4];
@@ -413,7 +413,7 @@ static inline __be32 maskl(__be32 a, unsigned int l)
        return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
 {
        switch (p) {
@@ -445,6 +445,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
 {
        __be16 _ports[2], *ports;
        u8 nexthdr;
+       __be16 frag_off;
        int poff;
 
        memset(dst, 0, sizeof(*dst));
@@ -463,7 +464,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                        return 0;
                nexthdr = ip_hdr(skb)->protocol;
                break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        case NFPROTO_IPV6:
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
                        memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
@@ -480,7 +481,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
                        return 0;
                nexthdr = ipv6_hdr(skb)->nexthdr;
-               protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+               protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
                if ((int)protoff < 0)
                        return -1;
                break;
@@ -615,7 +616,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
                .destroy        = hashlimit_mt_destroy,
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        {
                .name           = "hashlimit",
                .revision       = 1,
@@ -692,7 +693,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
                                 ent->rateinfo.credit, ent->rateinfo.credit_cap,
                                 ent->rateinfo.cost);
                break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        case NFPROTO_IPV6:
                res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
                                 (long)(ent->expires - jiffies)/HZ,
@@ -760,7 +761,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
        hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
        if (!hashlimit_net->ipt_hashlimit)
                return -ENOMEM;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
        if (!hashlimit_net->ip6t_hashlimit) {
                proc_net_remove(net, "ipt_hashlimit");
@@ -773,7 +774,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
 static void __net_exit hashlimit_proc_net_exit(struct net *net)
 {
        proc_net_remove(net, "ipt_hashlimit");
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        proc_net_remove(net, "ip6t_hashlimit");
 #endif
 }
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
new file mode 100644 (file)
index 0000000..b3be0ef
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2011 Intra2net AG <http://www.intra2net.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 (or any
+ * later at your option) as published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nfnetlink_acct.h>
+#include <linux/netfilter/xt_nfacct.h>
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_nfacct");
+MODULE_ALIAS("ip6t_nfacct");
+
+static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_nfacct_match_info *info = par->targinfo;
+
+       nfnl_acct_update(skb, info->nfacct);
+
+       return true;
+}
+
+static int
+nfacct_mt_checkentry(const struct xt_mtchk_param *par)
+{
+       struct xt_nfacct_match_info *info = par->matchinfo;
+       struct nf_acct *nfacct;
+
+       nfacct = nfnl_acct_find_get(info->name);
+       if (nfacct == NULL) {
+               pr_info("xt_nfacct: accounting object with name `%s' "
+                       "does not exists\n", info->name);
+               return -ENOENT;
+       }
+       info->nfacct = nfacct;
+       return 0;
+}
+
+static void
+nfacct_mt_destroy(const struct xt_mtdtor_param *par)
+{
+       const struct xt_nfacct_match_info *info = par->matchinfo;
+
+       nfnl_acct_put(info->nfacct);
+}
+
+static struct xt_match nfacct_mt_reg __read_mostly = {
+       .name       = "nfacct",
+       .family     = NFPROTO_UNSPEC,
+       .checkentry = nfacct_mt_checkentry,
+       .match      = nfacct_mt,
+       .destroy    = nfacct_mt_destroy,
+       .matchsize  = sizeof(struct xt_nfacct_match_info),
+       .me         = THIS_MODULE,
+};
+
+static int __init nfacct_mt_init(void)
+{
+       return xt_register_match(&nfacct_mt_reg);
+}
+
+static void __exit nfacct_mt_exit(void)
+{
+       xt_unregister_match(&nfacct_mt_reg);
+}
+
+module_init(nfacct_mt_init);
+module_exit(nfacct_mt_exit);
index fe39f7e913dff490e948ca47b1ce6c14844013d6..72bb07f57f97c1a7bbc493b187aa0c9f5719ea61 100644 (file)
@@ -22,7 +22,7 @@
 #include <net/netfilter/nf_tproxy_core.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #define XT_SOCKET_HAVE_IPV6 1
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
@@ -30,7 +30,7 @@
 
 #include <linux/netfilter/xt_socket.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #define XT_SOCKET_HAVE_CONNTRACK 1
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -214,6 +214,7 @@ extract_icmp6_fields(const struct sk_buff *skb,
        struct icmp6hdr *icmph, _icmph;
        __be16 *ports, _ports[2];
        u8 inside_nexthdr;
+       __be16 inside_fragoff;
        int inside_hdrlen;
 
        icmph = skb_header_pointer(skb, outside_hdrlen,
@@ -229,7 +230,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
                return 1;
        inside_nexthdr = inside_iph->nexthdr;
 
-       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), &inside_nexthdr);
+       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
+                                        &inside_nexthdr, &inside_fragoff);
        if (inside_hdrlen < 0)
                return 1; /* hjm: Packet has no/incomplete transport layer headers. */
 
index 96b749dacc344def55e226188a5095b74a3f1b40..6f1701322fb695207684abc400452bb8e749c57a 100644 (file)
@@ -96,7 +96,7 @@ struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr,
 }
 
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_search - Search for a matching IPv6 address entry
  * @addr: IPv6 address
@@ -185,7 +185,7 @@ int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head)
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_add - Add a new IPv6 address entry to a list
  * @entry: address entry
@@ -263,7 +263,7 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
        return entry;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_remove_entry - Remove an IPv6 address entry
  * @entry: address entry
@@ -342,7 +342,7 @@ void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
        }
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_audit_addr - Audit an IPv6 address
  * @audit_buf: audit buffer
index fdbc1d2c7352be7f993f814ef205cdbd9c5dec2e..a1287ce181300cef285ec6748d641c946453c1a2 100644 (file)
@@ -133,7 +133,7 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
 }
 #endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 #define __af6list_entry(ptr) container_of(ptr, struct netlbl_af6list, list)
 
index 3f905e5370c28eed56d3d1404917114b973c1b0b..38204112b9f4a0faec6e629e3431359c17033b38 100644 (file)
@@ -78,7 +78,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
        struct netlbl_dom_map *ptr;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -90,7 +90,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
                        netlbl_af4list_remove_entry(iter4);
                        kfree(netlbl_domhsh_addr4_entry(iter4));
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_safe(iter6, tmp6,
                                            &ptr->type_def.addrsel->list6) {
                        netlbl_af6list_remove_entry(iter6);
@@ -217,7 +217,7 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
                        cipsov4 = map4->type_def.cipsov4;
                        netlbl_af4list_audit_addr(audit_buf, 0, NULL,
                                                  addr4->addr, addr4->mask);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                } else if (addr6 != NULL) {
                        struct netlbl_domaddr6_map *map6;
                        map6 = netlbl_domhsh_addr6_entry(addr6);
@@ -306,7 +306,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
        struct netlbl_dom_map *entry_old;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -338,7 +338,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                                               &entry->type_def.addrsel->list4)
                                netlbl_domhsh_audit_add(entry, iter4, NULL,
                                                        ret_val, audit_info);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        netlbl_af6list_foreach_rcu(iter6,
                                               &entry->type_def.addrsel->list6)
                                netlbl_domhsh_audit_add(entry, NULL, iter6,
@@ -365,7 +365,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                                ret_val = -EEXIST;
                                goto add_return;
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_rcu(iter6,
                                           &entry->type_def.addrsel->list6)
                        if (netlbl_af6list_search_exact(&iter6->addr,
@@ -386,7 +386,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                        if (ret_val != 0)
                                goto add_return;
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_safe(iter6, tmp6,
                                            &entry->type_def.addrsel->list6) {
                        netlbl_af6list_remove_entry(iter6);
@@ -510,7 +510,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
        struct netlbl_dom_map *entry_map;
        struct netlbl_af4list *entry_addr;
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif /* IPv6 */
        struct netlbl_domaddr4_map *entry;
@@ -533,7 +533,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
                goto remove_af4_failure;
        netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
                goto remove_af4_single_addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
                goto remove_af4_single_addr;
 #endif /* IPv6 */
@@ -644,7 +644,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
        return netlbl_domhsh_addr4_entry(addr_iter);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
  * @domain: the domain name to search for
index bfcc0f7024c5d69547ade4bb7cb76f58f9e1564d..90872c4ca30f4ba9aad89089eff5dadfdff42717 100644 (file)
@@ -104,7 +104,7 @@ int netlbl_domhsh_walk(u32 *skip_bkt,
                     int (*callback) (struct netlbl_dom_map *entry, void *arg),
                     void *cb_arg);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
                                                  const struct in6_addr *addr);
 #endif /* IPv6 */
index 9c24de10a6579b78e452e47912c56e5d894e58ea..2560e7b441c60a2b7a8c76aa6de3c4d62bd92035 100644 (file)
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
        struct netlbl_domaddr_map *addrmap = NULL;
        struct netlbl_domaddr4_map *map4 = NULL;
        struct netlbl_domaddr6_map *map6 = NULL;
-       const struct in_addr *addr4, *mask4;
-       const struct in6_addr *addr6, *mask6;
 
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                INIT_LIST_HEAD(&addrmap->list6);
 
                switch (family) {
-               case AF_INET:
-                       addr4 = addr;
-                       mask4 = mask;
+               case AF_INET: {
+                       const struct in_addr *addr4 = addr;
+                       const struct in_addr *mask4 = mask;
                        map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
                        if (map4 == NULL)
                                goto cfg_unlbl_map_add_failure;
@@ -148,25 +146,29 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
-               case AF_INET6:
-                       addr6 = addr;
-                       mask6 = mask;
+                       }
+#if IS_ENABLED(CONFIG_IPV6)
+               case AF_INET6: {
+                       const struct in6_addr *addr6 = addr;
+                       const struct in6_addr *mask6 = mask;
                        map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
                        if (map6 == NULL)
                                goto cfg_unlbl_map_add_failure;
                        map6->type = NETLBL_NLTYPE_UNLABELED;
-                       ipv6_addr_copy(&map6->list.addr, addr6);
+                       map6->list.addr = *addr6;
                        map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
                        map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
                        map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
                        map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
-                       ipv6_addr_copy(&map6->list.mask, mask6);
+                       map6->list.mask = *mask6;
                        map6->list.valid = 1;
-                       ret_val = netlbl_af4list_add(&map4->list,
-                                                    &addrmap->list4);
+                       ret_val = netlbl_af6list_add(&map6->list,
+                                                    &addrmap->list6);
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
+                       }
+#endif /* IPv6 */
                default:
                        goto cfg_unlbl_map_add_failure;
                        break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+#endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+#endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
@@ -667,7 +673,7 @@ int netlbl_sock_setattr(struct sock *sk,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -718,7 +724,7 @@ int netlbl_sock_getattr(struct sock *sk,
        case AF_INET:
                ret_val = cipso_v4_sock_getattr(sk, secattr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                ret_val = -ENOMSG;
                break;
@@ -776,7 +782,7 @@ int netlbl_conn_setattr(struct sock *sk,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -847,7 +853,7 @@ int netlbl_req_setattr(struct request_sock *req,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -920,7 +926,7 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -959,7 +965,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
                    cipso_v4_skbuff_getattr(skb, secattr) == 0)
                        return 0;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                break;
 #endif /* IPv6 */
index bfa55586977572e668d2a53004513358ce9efcf0..4809e2e48b02542931d436188680f9663a6dd699 100644 (file)
@@ -184,7 +184,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
 
                entry->type = NETLBL_NLTYPE_ADDRSELECT;
                entry->type_def.addrsel = addrmap;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        } else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
                struct in6_addr *addr;
                struct in6_addr *mask;
@@ -216,12 +216,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                        ret_val = -ENOMEM;
                        goto add_failure;
                }
-               ipv6_addr_copy(&map->list.addr, addr);
+               map->list.addr = *addr;
                map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
                map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
                map->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
                map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
-               ipv6_addr_copy(&map->list.mask, mask);
+               map->list.mask = *mask;
                map->list.valid = 1;
                map->type = entry->type;
 
@@ -270,7 +270,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
        struct nlattr *nla_a;
        struct nlattr *nla_b;
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif
 
@@ -324,7 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
 
                        nla_nest_end(skb, nla_b);
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_rcu(iter6,
                                           &entry->type_def.addrsel->list6) {
                        struct netlbl_domaddr6_map *map6;
index e251c2c88521dac541700d7ce1b318ae9f5f23b6..4b5fa0fe78fd36a3e4055328535ffa3283eae642 100644 (file)
@@ -170,7 +170,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
        struct netlbl_unlhsh_iface *iface;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -184,7 +184,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
                netlbl_af4list_remove_entry(iter4);
                kfree(netlbl_unlhsh_addr4_entry(iter4));
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_safe(iter6, tmp6, &iface->addr6_list) {
                netlbl_af6list_remove_entry(iter6);
                kfree(netlbl_unlhsh_addr6_entry(iter6));
@@ -274,7 +274,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
        return ret_val;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
  * @iface: the associated interface entry
@@ -300,12 +300,12 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
        if (entry == NULL)
                return -ENOMEM;
 
-       ipv6_addr_copy(&entry->list.addr, addr);
+       entry->list.addr = *addr;
        entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
        entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
        entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
        entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
-       ipv6_addr_copy(&entry->list.mask, mask);
+       entry->list.mask = *mask;
        entry->list.valid = 1;
        entry->secid = secid;
 
@@ -436,7 +436,7 @@ int netlbl_unlhsh_add(struct net *net,
                                                  mask4->s_addr);
                break;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case sizeof(struct in6_addr): {
                const struct in6_addr *addr6 = addr;
                const struct in6_addr *mask6 = mask;
@@ -531,7 +531,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
  * @net: network namespace
@@ -606,14 +606,14 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
 static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
 {
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif /* IPv6 */
 
        spin_lock(&netlbl_unlhsh_lock);
        netlbl_af4list_foreach_rcu(iter4, &iface->addr4_list)
                goto unlhsh_condremove_failure;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(iter6, &iface->addr6_list)
                goto unlhsh_condremove_failure;
 #endif /* IPv6 */
@@ -680,7 +680,7 @@ int netlbl_unlhsh_remove(struct net *net,
                                                     iface, addr, mask,
                                                     audit_info);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case sizeof(struct in6_addr):
                ret_val = netlbl_unlhsh_remove_addr6(net,
                                                     iface, addr, mask,
@@ -1196,7 +1196,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
        struct netlbl_unlhsh_iface *iface;
        struct list_head *iter_list;
        struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *addr6;
 #endif
 
@@ -1228,7 +1228,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
                                        goto unlabel_staticlist_return;
                                }
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        netlbl_af6list_foreach_rcu(addr6,
                                                   &iface->addr6_list) {
                                if (iter_addr6++ < skip_addr6)
@@ -1277,7 +1277,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
        u32 skip_addr6 = cb->args[1];
        u32 iter_addr4 = 0;
        struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        u32 iter_addr6 = 0;
        struct netlbl_af6list *addr6;
 #endif
@@ -1303,7 +1303,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
                        goto unlabel_staticlistdef_return;
                }
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
                if (iter_addr6++ < skip_addr6)
                        continue;
@@ -1494,7 +1494,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
                secattr->attr.secid = netlbl_unlhsh_addr4_entry(addr4)->secid;
                break;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6: {
                struct ipv6hdr *hdr6;
                struct netlbl_af6list *addr6;
index 1201b6d4183d8993b1175119978a3baee4481eec..629b06182f3faab89ec771df253575ed467b6a2a 100644 (file)
@@ -139,12 +139,12 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
-static u32 netlink_group_mask(u32 group)
+static inline u32 netlink_group_mask(u32 group)
 {
        return group ? 1 << (group - 1) : 0;
 }
 
-static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
+static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
 {
        return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
 }
@@ -226,8 +226,7 @@ netlink_unlock_table(void)
                wake_up(&nl_table_wait);
 }
 
-static inline struct sock *netlink_lookup(struct net *net, int protocol,
-                                         u32 pid)
+static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
 {
        struct nl_pid_hash *hash = &nl_table[protocol].hash;
        struct hlist_head *head;
@@ -248,7 +247,7 @@ found:
        return sk;
 }
 
-static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
+static struct hlist_head *nl_pid_hash_zalloc(size_t size)
 {
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_ATOMIC);
@@ -258,7 +257,7 @@ static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
                                         get_order(size));
 }
 
-static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
+static void nl_pid_hash_free(struct hlist_head *table, size_t size)
 {
        if (size <= PAGE_SIZE)
                kfree(table);
@@ -578,7 +577,7 @@ retry:
        return err;
 }
 
-static inline int netlink_capable(struct socket *sock, unsigned int flag)
+static inline int netlink_capable(const struct socket *sock, unsigned int flag)
 {
        return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
               capable(CAP_NET_ADMIN);
@@ -846,8 +845,7 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
        sock_put(sk);
 }
 
-static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
-                                          gfp_t allocation)
+static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
 {
        int delta;
 
@@ -871,7 +869,7 @@ static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
        return skb;
 }
 
-static inline void netlink_rcv_wake(struct sock *sk)
+static void netlink_rcv_wake(struct sock *sk)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
 
@@ -881,7 +879,7 @@ static inline void netlink_rcv_wake(struct sock *sk)
                wake_up_interruptible(&nlk->wait);
 }
 
-static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
 {
        int ret;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -952,8 +950,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
 }
 EXPORT_SYMBOL_GPL(netlink_has_listeners);
 
-static inline int netlink_broadcast_deliver(struct sock *sk,
-                                           struct sk_buff *skb)
+static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
 
@@ -962,7 +959,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
                skb_set_owner_r(skb, sk);
                skb_queue_tail(&sk->sk_receive_queue, skb);
                sk->sk_data_ready(sk, skb->len);
-               return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
+               return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
        }
        return -1;
 }
@@ -982,7 +979,7 @@ struct netlink_broadcast_data {
        void *tx_data;
 };
 
-static inline int do_one_broadcast(struct sock *sk,
+static int do_one_broadcast(struct sock *sk,
                                   struct netlink_broadcast_data *p)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -1110,8 +1107,7 @@ struct netlink_set_err_data {
        int code;
 };
 
-static inline int do_one_set_err(struct sock *sk,
-                                struct netlink_set_err_data *p)
+static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        int ret = 0;
index 482fa571b4eecb9b89bc67ac523f0ff6c08ef0f5..a403b618faa5b80e0ef06abb4e5dc648874f7d34 100644 (file)
@@ -33,6 +33,14 @@ void genl_unlock(void)
 }
 EXPORT_SYMBOL(genl_unlock);
 
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_genl_is_held(void)
+{
+       return lockdep_is_held(&genl_mutex);
+}
+EXPORT_SYMBOL(lockdep_genl_is_held);
+#endif
+
 #define GENL_FAM_TAB_SIZE      16
 #define GENL_FAM_TAB_MASK      (GENL_FAM_TAB_SIZE - 1)
 
@@ -98,7 +106,7 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
 /* Of course we are going to have problems once we hit
  * 2^16 alive types, but that can only happen by year 2K
 */
-static inline u16 genl_generate_id(void)
+static u16 genl_generate_id(void)
 {
        static u16 id_gen_idx = GENL_MIN_ID;
        int i;
@@ -784,6 +792,15 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
 
                name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
                res = genl_family_find_byname(name);
+#ifdef CONFIG_MODULES
+               if (res == NULL) {
+                       genl_unlock();
+                       request_module("net-pf-%d-proto-%d-type-%s",
+                                      PF_NETLINK, NETLINK_GENERIC, name);
+                       genl_lock();
+                       res = genl_family_find_byname(name);
+               }
+#endif
                err = -ENOENT;
        }
 
@@ -946,3 +963,16 @@ int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
        return genlmsg_mcast(skb, pid, group, flags);
 }
 EXPORT_SYMBOL(genlmsg_multicast_allns);
+
+void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+                struct nlmsghdr *nlh, gfp_t flags)
+{
+       struct sock *sk = net->genl_sock;
+       int report = 0;
+
+       if (nlh)
+               report = nlmsg_report(nlh);
+
+       nlmsg_notify(sk, skb, pid, group, report, flags);
+}
+EXPORT_SYMBOL(genl_notify);
index 732152f718e0dceb36826e7c4f36935c20ef603b..7dab229bfbcca3c67aa6112ec4a19a40c411adcb 100644 (file)
@@ -306,26 +306,26 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
 {
        struct sock *sk = sock->sk;
        struct nr_sock *nr = nr_sk(sk);
-       int opt;
+       unsigned long opt;
 
        if (level != SOL_NETROM)
                return -ENOPROTOOPT;
 
-       if (optlen < sizeof(int))
+       if (optlen < sizeof(unsigned int))
                return -EINVAL;
 
-       if (get_user(opt, (int __user *)optval))
+       if (get_user(opt, (unsigned int __user *)optval))
                return -EFAULT;
 
        switch (optname) {
        case NETROM_T1:
-               if (opt < 1)
+               if (opt < 1 || opt > ULONG_MAX / HZ)
                        return -EINVAL;
                nr->t1 = opt * HZ;
                return 0;
 
        case NETROM_T2:
-               if (opt < 1)
+               if (opt < 1 || opt > ULONG_MAX / HZ)
                        return -EINVAL;
                nr->t2 = opt * HZ;
                return 0;
@@ -337,13 +337,13 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
                return 0;
 
        case NETROM_T4:
-               if (opt < 1)
+               if (opt < 1 || opt > ULONG_MAX / HZ)
                        return -EINVAL;
                nr->t4 = opt * HZ;
                return 0;
 
        case NETROM_IDLE:
-               if (opt < 0)
+               if (opt > ULONG_MAX / (60 * HZ))
                        return -EINVAL;
                nr->idle = opt * 60 * HZ;
                return 0;
@@ -1244,7 +1244,8 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        case SIOCADDRT:
        case SIOCDELRT:
        case SIOCNRDECOBS:
-               if (!capable(CAP_NET_ADMIN)) return -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
                return nr_rt_ioctl(cmd, argp);
 
        default:
index 915a87ba23e188faa31861aac9a38d3b2f180827..2cf330162d7e8e21ddcbc21eb741336e332ec422 100644 (file)
@@ -670,14 +670,17 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
        case SIOCADDRT:
                if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
                        return -EFAULT;
-               if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
+               if (nr_route.ndigis > AX25_MAX_DIGIS)
                        return -EINVAL;
-               if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
-                       dev_put(dev);
+               if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
                        return -EINVAL;
-               }
                switch (nr_route.type) {
                case NETROM_NODE:
+                       if (strnlen(nr_route.mnemonic, 7) == 7) {
+                               ret = -EINVAL;
+                               break;
+                       }
+
                        ret = nr_add_node(&nr_route.callsign,
                                nr_route.mnemonic,
                                &nr_route.neighbour,
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
new file mode 100644 (file)
index 0000000..d9ea33c
--- /dev/null
@@ -0,0 +1,28 @@
+#
+# Open vSwitch
+#
+
+config OPENVSWITCH
+       tristate "Open vSwitch"
+       ---help---
+         Open vSwitch is a multilayer Ethernet switch targeted at virtualized
+         environments.  In addition to supporting a variety of features
+         expected in a traditional hardware switch, it enables fine-grained
+         programmatic extension and flow-based control of the network.  This
+         control is useful in a wide variety of applications but is
+         particularly important in multi-server virtualization deployments,
+         which are often characterized by highly dynamic endpoints and the
+         need to maintain logical abstractions for multiple tenants.
+
+         The Open vSwitch datapath provides an in-kernel fast path for packet
+         forwarding.  It is complemented by a userspace daemon, ovs-vswitchd,
+         which is able to accept configuration from a variety of sources and
+         translate it into packet processing rules.
+
+         See http://openvswitch.org for more information and userspace
+         utilities.
+
+         To compile this code as a module, choose M here: the module will be
+         called openvswitch.
+
+         If unsure, say N.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
new file mode 100644 (file)
index 0000000..15e7384
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Makefile for Open vSwitch.
+#
+
+obj-$(CONFIG_OPENVSWITCH) += openvswitch.o
+
+openvswitch-y := \
+       actions.o \
+       datapath.o \
+       dp_notify.o \
+       flow.o \
+       vport.o \
+       vport-internal_dev.o \
+       vport-netdev.o \
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
new file mode 100644 (file)
index 0000000..2725d1b
--- /dev/null
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/openvswitch.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in6.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+                       const struct nlattr *attr, int len, bool keep_skb);
+
+static int make_writable(struct sk_buff *skb, int write_len)
+{
+       if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+               return 0;
+
+       return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/* remove VLAN header from packet and update csum accrodingly. */
+static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
+{
+       struct vlan_hdr *vhdr;
+       int err;
+
+       err = make_writable(skb, VLAN_ETH_HLEN);
+       if (unlikely(err))
+               return err;
+
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_sub(skb->csum, csum_partial(skb->data
+                                       + ETH_HLEN, VLAN_HLEN, 0));
+
+       vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+       *current_tci = vhdr->h_vlan_TCI;
+
+       memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+       __skb_pull(skb, VLAN_HLEN);
+
+       vlan_set_encap_proto(skb, vhdr);
+       skb->mac_header += VLAN_HLEN;
+       skb_reset_mac_len(skb);
+
+       return 0;
+}
+
+static int pop_vlan(struct sk_buff *skb)
+{
+       __be16 tci;
+       int err;
+
+       if (likely(vlan_tx_tag_present(skb))) {
+               skb->vlan_tci = 0;
+       } else {
+               if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
+                            skb->len < VLAN_ETH_HLEN))
+                       return 0;
+
+               err = __pop_vlan_tci(skb, &tci);
+               if (err)
+                       return err;
+       }
+       /* move next vlan tag to hw accel tag */
+       if (likely(skb->protocol != htons(ETH_P_8021Q) ||
+                  skb->len < VLAN_ETH_HLEN))
+               return 0;
+
+       err = __pop_vlan_tci(skb, &tci);
+       if (unlikely(err))
+               return err;
+
+       __vlan_hwaccel_put_tag(skb, ntohs(tci));
+       return 0;
+}
+
+static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
+{
+       if (unlikely(vlan_tx_tag_present(skb))) {
+               u16 current_tag;
+
+               /* push down current VLAN tag */
+               current_tag = vlan_tx_tag_get(skb);
+
+               if (!__vlan_put_tag(skb, current_tag))
+                       return -ENOMEM;
+
+               if (skb->ip_summed == CHECKSUM_COMPLETE)
+                       skb->csum = csum_add(skb->csum, csum_partial(skb->data
+                                       + ETH_HLEN, VLAN_HLEN, 0));
+
+       }
+       __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+       return 0;
+}
+
+static int set_eth_addr(struct sk_buff *skb,
+                       const struct ovs_key_ethernet *eth_key)
+{
+       int err;
+       err = make_writable(skb, ETH_HLEN);
+       if (unlikely(err))
+               return err;
+
+       memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
+       memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
+
+       return 0;
+}
+
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+                               __be32 *addr, __be32 new_addr)
+{
+       int transport_len = skb->len - skb_transport_offset(skb);
+
+       if (nh->protocol == IPPROTO_TCP) {
+               if (likely(transport_len >= sizeof(struct tcphdr)))
+                       inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
+                                                *addr, new_addr, 1);
+       } else if (nh->protocol == IPPROTO_UDP) {
+               if (likely(transport_len >= sizeof(struct udphdr)))
+                       inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
+                                                *addr, new_addr, 1);
+       }
+
+       csum_replace4(&nh->check, *addr, new_addr);
+       skb->rxhash = 0;
+       *addr = new_addr;
+}
+
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
+{
+       csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
+       nh->ttl = new_ttl;
+}
+
+static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
+{
+       struct iphdr *nh;
+       int err;
+
+       err = make_writable(skb, skb_network_offset(skb) +
+                                sizeof(struct iphdr));
+       if (unlikely(err))
+               return err;
+
+       nh = ip_hdr(skb);
+
+       if (ipv4_key->ipv4_src != nh->saddr)
+               set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
+
+       if (ipv4_key->ipv4_dst != nh->daddr)
+               set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
+
+       if (ipv4_key->ipv4_tos != nh->tos)
+               ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
+
+       if (ipv4_key->ipv4_ttl != nh->ttl)
+               set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
+
+       return 0;
+}
+
+/* Must follow make_writable() since that can move the skb data. */
+static void set_tp_port(struct sk_buff *skb, __be16 *port,
+                        __be16 new_port, __sum16 *check)
+{
+       inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+       *port = new_port;
+       skb->rxhash = 0;
+}
+
+static int set_udp_port(struct sk_buff *skb,
+                       const struct ovs_key_udp *udp_port_key)
+{
+       struct udphdr *uh;
+       int err;
+
+       err = make_writable(skb, skb_transport_offset(skb) +
+                                sizeof(struct udphdr));
+       if (unlikely(err))
+               return err;
+
+       uh = udp_hdr(skb);
+       if (udp_port_key->udp_src != uh->source)
+               set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
+
+       if (udp_port_key->udp_dst != uh->dest)
+               set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
+
+       return 0;
+}
+
+static int set_tcp_port(struct sk_buff *skb,
+                       const struct ovs_key_tcp *tcp_port_key)
+{
+       struct tcphdr *th;
+       int err;
+
+       err = make_writable(skb, skb_transport_offset(skb) +
+                                sizeof(struct tcphdr));
+       if (unlikely(err))
+               return err;
+
+       th = tcp_hdr(skb);
+       if (tcp_port_key->tcp_src != th->source)
+               set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
+
+       if (tcp_port_key->tcp_dst != th->dest)
+               set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
+
+       return 0;
+}
+
+static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+{
+       struct vport *vport;
+
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       vport = rcu_dereference(dp->ports[out_port]);
+       if (unlikely(!vport)) {
+               kfree_skb(skb);
+               return -ENODEV;
+       }
+
+       ovs_vport_send(vport, skb);
+       return 0;
+}
+
+static int output_userspace(struct datapath *dp, struct sk_buff *skb,
+                           const struct nlattr *attr)
+{
+       struct dp_upcall_info upcall;
+       const struct nlattr *a;
+       int rem;
+
+       upcall.cmd = OVS_PACKET_CMD_ACTION;
+       upcall.key = &OVS_CB(skb)->flow->key;
+       upcall.userdata = NULL;
+       upcall.pid = 0;
+
+       for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+                a = nla_next(a, &rem)) {
+               switch (nla_type(a)) {
+               case OVS_USERSPACE_ATTR_USERDATA:
+                       upcall.userdata = a;
+                       break;
+
+               case OVS_USERSPACE_ATTR_PID:
+                       upcall.pid = nla_get_u32(a);
+                       break;
+               }
+       }
+
+       return ovs_dp_upcall(dp, skb, &upcall);
+}
+
+static int sample(struct datapath *dp, struct sk_buff *skb,
+                 const struct nlattr *attr)
+{
+       const struct nlattr *acts_list = NULL;
+       const struct nlattr *a;
+       int rem;
+
+       for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+                a = nla_next(a, &rem)) {
+               switch (nla_type(a)) {
+               case OVS_SAMPLE_ATTR_PROBABILITY:
+                       if (net_random() >= nla_get_u32(a))
+                               return 0;
+                       break;
+
+               case OVS_SAMPLE_ATTR_ACTIONS:
+                       acts_list = a;
+                       break;
+               }
+       }
+
+       return do_execute_actions(dp, skb, nla_data(acts_list),
+                                                nla_len(acts_list), true);
+}
+
+static int execute_set_action(struct sk_buff *skb,
+                                const struct nlattr *nested_attr)
+{
+       int err = 0;
+
+       switch (nla_type(nested_attr)) {
+       case OVS_KEY_ATTR_PRIORITY:
+               skb->priority = nla_get_u32(nested_attr);
+               break;
+
+       case OVS_KEY_ATTR_ETHERNET:
+               err = set_eth_addr(skb, nla_data(nested_attr));
+               break;
+
+       case OVS_KEY_ATTR_IPV4:
+               err = set_ipv4(skb, nla_data(nested_attr));
+               break;
+
+       case OVS_KEY_ATTR_TCP:
+               err = set_tcp_port(skb, nla_data(nested_attr));
+               break;
+
+       case OVS_KEY_ATTR_UDP:
+               err = set_udp_port(skb, nla_data(nested_attr));
+               break;
+       }
+
+       return err;
+}
+
+/* Execute a list of actions against 'skb'. */
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+                       const struct nlattr *attr, int len, bool keep_skb)
+{
+       /* Every output action needs a separate clone of 'skb', but the common
+        * case is just a single output action, so that doing a clone and
+        * then freeing the original skbuff is wasteful.  So the following code
+        * is slightly obscure just to avoid that. */
+       int prev_port = -1;
+       const struct nlattr *a;
+       int rem;
+
+       for (a = attr, rem = len; rem > 0;
+            a = nla_next(a, &rem)) {
+               int err = 0;
+
+               if (prev_port != -1) {
+                       do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
+                       prev_port = -1;
+               }
+
+               switch (nla_type(a)) {
+               case OVS_ACTION_ATTR_OUTPUT:
+                       prev_port = nla_get_u32(a);
+                       break;
+
+               case OVS_ACTION_ATTR_USERSPACE:
+                       output_userspace(dp, skb, a);
+                       break;
+
+               case OVS_ACTION_ATTR_PUSH_VLAN:
+                       err = push_vlan(skb, nla_data(a));
+                       if (unlikely(err)) /* skb already freed. */
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_POP_VLAN:
+                       err = pop_vlan(skb);
+                       break;
+
+               case OVS_ACTION_ATTR_SET:
+                       err = execute_set_action(skb, nla_data(a));
+                       break;
+
+               case OVS_ACTION_ATTR_SAMPLE:
+                       err = sample(dp, skb, a);
+                       break;
+               }
+
+               if (unlikely(err)) {
+                       kfree_skb(skb);
+                       return err;
+               }
+       }
+
+       if (prev_port != -1) {
+               if (keep_skb)
+                       skb = skb_clone(skb, GFP_ATOMIC);
+
+               do_output(dp, skb, prev_port);
+       } else if (!keep_skb)
+               consume_skb(skb);
+
+       return 0;
+}
+
+/* Execute a list of actions against 'skb'. */
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
+{
+       struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+       return do_execute_actions(dp, skb, acts->actions,
+                                        acts->actions_len, false);
+}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
new file mode 100644 (file)
index 0000000..9a27251
--- /dev/null
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jhash.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/etherdevice.h>
+#include <linux/genetlink.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/wait.h>
+#include <asm/system.h>
+#include <asm/div64.h>
+#include <linux/highmem.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/inetdevice.h>
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/rculist.h>
+#include <linux/dmi.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "flow.h"
+#include "vport-internal_dev.h"
+
+/**
+ * DOC: Locking:
+ *
+ * Writes to device state (add/remove datapath, port, set operations on vports,
+ * etc.) are protected by RTNL.
+ *
+ * Writes to other state (flow table modifications, set miscellaneous datapath
+ * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
+ * genl_mutex.
+ *
+ * Reads are protected by RCU.
+ *
+ * There are a few special cases (mostly stats) that have their own
+ * synchronization but they nest under all of above and don't interact with
+ * each other.
+ */
+
+/* Global list of datapaths to enable dumping them all out.
+ * Protected by genl_mutex.
+ */
+static LIST_HEAD(dps);
+
+#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
+static void rehash_flow_table(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+
+static struct vport *new_vport(const struct vport_parms *);
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
+                            const struct dp_upcall_info *);
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
+                                 const struct dp_upcall_info *);
+
+/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
+static struct datapath *get_dp(int dp_ifindex)
+{
+       struct datapath *dp = NULL;
+       struct net_device *dev;
+
+       rcu_read_lock();
+       dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
+       if (dev) {
+               struct vport *vport = ovs_internal_dev_get_vport(dev);
+               if (vport)
+                       dp = vport->dp;
+       }
+       rcu_read_unlock();
+
+       return dp;
+}
+
+/* Must be called with rcu_read_lock or RTNL lock. */
+const char *ovs_dp_name(const struct datapath *dp)
+{
+       struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+       return vport->ops->get_name(vport);
+}
+
+static int get_dpifindex(struct datapath *dp)
+{
+       struct vport *local;
+       int ifindex;
+
+       rcu_read_lock();
+
+       local = rcu_dereference(dp->ports[OVSP_LOCAL]);
+       if (local)
+               ifindex = local->ops->get_ifindex(local);
+       else
+               ifindex = 0;
+
+       rcu_read_unlock();
+
+       return ifindex;
+}
+
+static void destroy_dp_rcu(struct rcu_head *rcu)
+{
+       struct datapath *dp = container_of(rcu, struct datapath, rcu);
+
+       ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
+       free_percpu(dp->stats_percpu);
+       kfree(dp);
+}
+
+/* Called with RTNL lock and genl_lock. */
+static struct vport *new_vport(const struct vport_parms *parms)
+{
+       struct vport *vport;
+
+       vport = ovs_vport_add(parms);
+       if (!IS_ERR(vport)) {
+               struct datapath *dp = parms->dp;
+
+               rcu_assign_pointer(dp->ports[parms->port_no], vport);
+               list_add(&vport->node, &dp->port_list);
+       }
+
+       return vport;
+}
+
+/* Called with RTNL lock. */
+void ovs_dp_detach_port(struct vport *p)
+{
+       ASSERT_RTNL();
+
+       /* First drop references to device. */
+       list_del(&p->node);
+       rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
+
+       /* Then destroy it. */
+       ovs_vport_del(p);
+}
+
+/* Must be called with rcu_read_lock. */
+void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+{
+       struct datapath *dp = p->dp;
+       struct sw_flow *flow;
+       struct dp_stats_percpu *stats;
+       struct sw_flow_key key;
+       u64 *stats_counter;
+       int error;
+       int key_len;
+
+       stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+       /* Extract flow from 'skb' into 'key'. */
+       error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
+       if (unlikely(error)) {
+               kfree_skb(skb);
+               return;
+       }
+
+       /* Look up flow. */
+       flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+       if (unlikely(!flow)) {
+               struct dp_upcall_info upcall;
+
+               upcall.cmd = OVS_PACKET_CMD_MISS;
+               upcall.key = &key;
+               upcall.userdata = NULL;
+               upcall.pid = p->upcall_pid;
+               ovs_dp_upcall(dp, skb, &upcall);
+               consume_skb(skb);
+               stats_counter = &stats->n_missed;
+               goto out;
+       }
+
+       OVS_CB(skb)->flow = flow;
+
+       stats_counter = &stats->n_hit;
+       ovs_flow_used(OVS_CB(skb)->flow, skb);
+       ovs_execute_actions(dp, skb);
+
+out:
+       /* Update datapath statistics. */
+       u64_stats_update_begin(&stats->sync);
+       (*stats_counter)++;
+       u64_stats_update_end(&stats->sync);
+}
+
+static struct genl_family dp_packet_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_PACKET_FAMILY,
+       .version = OVS_PACKET_VERSION,
+       .maxattr = OVS_PACKET_ATTR_MAX
+};
+
+int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+             const struct dp_upcall_info *upcall_info)
+{
+       struct dp_stats_percpu *stats;
+       int dp_ifindex;
+       int err;
+
+       if (upcall_info->pid == 0) {
+               err = -ENOTCONN;
+               goto err;
+       }
+
+       dp_ifindex = get_dpifindex(dp);
+       if (!dp_ifindex) {
+               err = -ENODEV;
+               goto err;
+       }
+
+       if (!skb_is_gso(skb))
+               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+       else
+               err = queue_gso_packets(dp_ifindex, skb, upcall_info);
+       if (err)
+               goto err;
+
+       return 0;
+
+err:
+       stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+       u64_stats_update_begin(&stats->sync);
+       stats->n_lost++;
+       u64_stats_update_end(&stats->sync);
+
+       return err;
+}
+
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
+                            const struct dp_upcall_info *upcall_info)
+{
+       struct dp_upcall_info later_info;
+       struct sw_flow_key later_key;
+       struct sk_buff *segs, *nskb;
+       int err;
+
+       segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       /* Queue all of the segments. */
+       skb = segs;
+       do {
+               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+               if (err)
+                       break;
+
+               if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+                       /* The initial flow key extracted by ovs_flow_extract()
+                        * in this case is for a first fragment, so we need to
+                        * properly mark later fragments.
+                        */
+                       later_key = *upcall_info->key;
+                       later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+
+                       later_info = *upcall_info;
+                       later_info.key = &later_key;
+                       upcall_info = &later_info;
+               }
+       } while ((skb = skb->next));
+
+       /* Free all of the segments. */
+       skb = segs;
+       do {
+               nskb = skb->next;
+               if (err)
+                       kfree_skb(skb);
+               else
+                       consume_skb(skb);
+       } while ((skb = nskb));
+       return err;
+}
+
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
+                                 const struct dp_upcall_info *upcall_info)
+{
+       struct ovs_header *upcall;
+       struct sk_buff *nskb = NULL;
+       struct sk_buff *user_skb; /* to be queued to userspace */
+       struct nlattr *nla;
+       unsigned int len;
+       int err;
+
+       if (vlan_tx_tag_present(skb)) {
+               nskb = skb_clone(skb, GFP_ATOMIC);
+               if (!nskb)
+                       return -ENOMEM;
+
+               nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
+               if (!skb)
+                       return -ENOMEM;
+
+               nskb->vlan_tci = 0;
+               skb = nskb;
+       }
+
+       if (nla_attr_size(skb->len) > USHRT_MAX) {
+               err = -EFBIG;
+               goto out;
+       }
+
+       len = sizeof(struct ovs_header);
+       len += nla_total_size(skb->len);
+       len += nla_total_size(FLOW_BUFSIZE);
+       if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
+               len += nla_total_size(8);
+
+       user_skb = genlmsg_new(len, GFP_ATOMIC);
+       if (!user_skb) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
+                            0, upcall_info->cmd);
+       upcall->dp_ifindex = dp_ifindex;
+
+       nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
+       ovs_flow_to_nlattrs(upcall_info->key, user_skb);
+       nla_nest_end(user_skb, nla);
+
+       if (upcall_info->userdata)
+               nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
+                           nla_get_u64(upcall_info->userdata));
+
+       nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
+
+       skb_copy_and_csum_dev(skb, nla_data(nla));
+
+       err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+
+out:
+       kfree_skb(nskb);
+       return err;
+}
+
+/* Called with genl_mutex. */
+static int flush_flows(int dp_ifindex)
+{
+       struct flow_table *old_table;
+       struct flow_table *new_table;
+       struct datapath *dp;
+
+       dp = get_dp(dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       old_table = genl_dereference(dp->table);
+       new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
+       if (!new_table)
+               return -ENOMEM;
+
+       rcu_assign_pointer(dp->table, new_table);
+
+       ovs_flow_tbl_deferred_destroy(old_table);
+       return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+                               const struct sw_flow_key *key, int depth);
+
+static int validate_sample(const struct nlattr *attr,
+                               const struct sw_flow_key *key, int depth)
+{
+       const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+       const struct nlattr *probability, *actions;
+       const struct nlattr *a;
+       int rem;
+
+       memset(attrs, 0, sizeof(attrs));
+       nla_for_each_nested(a, attr, rem) {
+               int type = nla_type(a);
+               if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
+                       return -EINVAL;
+               attrs[type] = a;
+       }
+       if (rem)
+               return -EINVAL;
+
+       probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
+       if (!probability || nla_len(probability) != sizeof(u32))
+               return -EINVAL;
+
+       actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
+       if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
+               return -EINVAL;
+       return validate_actions(actions, key, depth + 1);
+}
+
+static int validate_set(const struct nlattr *a,
+                       const struct sw_flow_key *flow_key)
+{
+       const struct nlattr *ovs_key = nla_data(a);
+       int key_type = nla_type(ovs_key);
+
+       /* There can be only one key in a action */
+       if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
+               return -EINVAL;
+
+       if (key_type > OVS_KEY_ATTR_MAX ||
+           nla_len(ovs_key) != ovs_key_lens[key_type])
+               return -EINVAL;
+
+       switch (key_type) {
+       const struct ovs_key_ipv4 *ipv4_key;
+
+       case OVS_KEY_ATTR_PRIORITY:
+       case OVS_KEY_ATTR_ETHERNET:
+               break;
+
+       case OVS_KEY_ATTR_IPV4:
+               if (flow_key->eth.type != htons(ETH_P_IP))
+                       return -EINVAL;
+
+               if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
+                       return -EINVAL;
+
+               ipv4_key = nla_data(ovs_key);
+               if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+                       return -EINVAL;
+
+               if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+                       return -EINVAL;
+
+               break;
+
+       case OVS_KEY_ATTR_TCP:
+               if (flow_key->ip.proto != IPPROTO_TCP)
+                       return -EINVAL;
+
+               if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+                       return -EINVAL;
+
+               break;
+
+       case OVS_KEY_ATTR_UDP:
+               if (flow_key->ip.proto != IPPROTO_UDP)
+                       return -EINVAL;
+
+               if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+                       return -EINVAL;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int validate_userspace(const struct nlattr *attr)
+{
+       static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
+               [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
+               [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
+       };
+       struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
+       int error;
+
+       error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+                                attr, userspace_policy);
+       if (error)
+               return error;
+
+       if (!a[OVS_USERSPACE_ATTR_PID] ||
+           !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+                               const struct sw_flow_key *key,  int depth)
+{
+       const struct nlattr *a;
+       int rem, err;
+
+       if (depth >= SAMPLE_ACTION_DEPTH)
+               return -EOVERFLOW;
+
+       nla_for_each_nested(a, attr, rem) {
+               /* Expected argument lengths, (u32)-1 for variable length. */
+               static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+                       [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
+                       [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+                       [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
+                       [OVS_ACTION_ATTR_POP_VLAN] = 0,
+                       [OVS_ACTION_ATTR_SET] = (u32)-1,
+                       [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
+               };
+               const struct ovs_action_push_vlan *vlan;
+               int type = nla_type(a);
+
+               if (type > OVS_ACTION_ATTR_MAX ||
+                   (action_lens[type] != nla_len(a) &&
+                    action_lens[type] != (u32)-1))
+                       return -EINVAL;
+
+               switch (type) {
+               case OVS_ACTION_ATTR_UNSPEC:
+                       return -EINVAL;
+
+               case OVS_ACTION_ATTR_USERSPACE:
+                       err = validate_userspace(a);
+                       if (err)
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_OUTPUT:
+                       if (nla_get_u32(a) >= DP_MAX_PORTS)
+                               return -EINVAL;
+                       break;
+
+
+               case OVS_ACTION_ATTR_POP_VLAN:
+                       break;
+
+               case OVS_ACTION_ATTR_PUSH_VLAN:
+                       vlan = nla_data(a);
+                       if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+                               return -EINVAL;
+                       if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+                               return -EINVAL;
+                       break;
+
+               case OVS_ACTION_ATTR_SET:
+                       err = validate_set(a, key);
+                       if (err)
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_SAMPLE:
+                       err = validate_sample(a, key, depth);
+                       if (err)
+                               return err;
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       if (rem > 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void clear_stats(struct sw_flow *flow)
+{
+       flow->used = 0;
+       flow->tcp_flags = 0;
+       flow->packet_count = 0;
+       flow->byte_count = 0;
+}
+
+static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
+{
+       struct ovs_header *ovs_header = info->userhdr;
+       struct nlattr **a = info->attrs;
+       struct sw_flow_actions *acts;
+       struct sk_buff *packet;
+       struct sw_flow *flow;
+       struct datapath *dp;
+       struct ethhdr *eth;
+       int len;
+       int err;
+       int key_len;
+
+       err = -EINVAL;
+       if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
+           !a[OVS_PACKET_ATTR_ACTIONS] ||
+           nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
+               goto err;
+
+       len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
+       packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
+       err = -ENOMEM;
+       if (!packet)
+               goto err;
+       skb_reserve(packet, NET_IP_ALIGN);
+
+       memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
+
+       skb_reset_mac_header(packet);
+       eth = eth_hdr(packet);
+
+       /* Normally, setting the skb 'protocol' field would be handled by a
+        * call to eth_type_trans(), but it assumes there's a sending
+        * device, which we may not have. */
+       if (ntohs(eth->h_proto) >= 1536)
+               packet->protocol = eth->h_proto;
+       else
+               packet->protocol = htons(ETH_P_802_2);
+
+       /* Build an sw_flow for sending this packet. */
+       flow = ovs_flow_alloc();
+       err = PTR_ERR(flow);
+       if (IS_ERR(flow))
+               goto err_kfree_skb;
+
+       err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
+       if (err)
+               goto err_flow_free;
+
+       err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
+                                            &flow->key.phy.in_port,
+                                            a[OVS_PACKET_ATTR_KEY]);
+       if (err)
+               goto err_flow_free;
+
+       err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
+       if (err)
+               goto err_flow_free;
+
+       flow->hash = ovs_flow_hash(&flow->key, key_len);
+
+       acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+       err = PTR_ERR(acts);
+       if (IS_ERR(acts))
+               goto err_flow_free;
+       rcu_assign_pointer(flow->sf_acts, acts);
+
+       OVS_CB(packet)->flow = flow;
+       packet->priority = flow->key.phy.priority;
+
+       rcu_read_lock();
+       dp = get_dp(ovs_header->dp_ifindex);
+       err = -ENODEV;
+       if (!dp)
+               goto err_unlock;
+
+       local_bh_disable();
+       err = ovs_execute_actions(dp, packet);
+       local_bh_enable();
+       rcu_read_unlock();
+
+       ovs_flow_free(flow);
+       return err;
+
+err_unlock:
+       rcu_read_unlock();
+err_flow_free:
+       ovs_flow_free(flow);
+err_kfree_skb:
+       kfree_skb(packet);
+err:
+       return err;
+}
+
+static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
+       [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
+       [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
+       [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_ops dp_packet_genl_ops[] = {
+       { .cmd = OVS_PACKET_CMD_EXECUTE,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = packet_policy,
+         .doit = ovs_packet_cmd_execute
+       }
+};
+
+static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
+{
+       int i;
+       struct flow_table *table = genl_dereference(dp->table);
+
+       stats->n_flows = ovs_flow_tbl_count(table);
+
+       stats->n_hit = stats->n_missed = stats->n_lost = 0;
+       for_each_possible_cpu(i) {
+               const struct dp_stats_percpu *percpu_stats;
+               struct dp_stats_percpu local_stats;
+               unsigned int start;
+
+               percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+                       local_stats = *percpu_stats;
+               } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+               stats->n_hit += local_stats.n_hit;
+               stats->n_missed += local_stats.n_missed;
+               stats->n_lost += local_stats.n_lost;
+       }
+}
+
+static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
+       [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+       [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+};
+
+static struct genl_family dp_flow_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_FLOW_FAMILY,
+       .version = OVS_FLOW_VERSION,
+       .maxattr = OVS_FLOW_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
+       .name = OVS_FLOW_MCGROUP
+};
+
+/* Called with genl_lock. */
+static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
+                                 struct sk_buff *skb, u32 pid,
+                                 u32 seq, u32 flags, u8 cmd)
+{
+       const int skb_orig_len = skb->len;
+       const struct sw_flow_actions *sf_acts;
+       struct ovs_flow_stats stats;
+       struct ovs_header *ovs_header;
+       struct nlattr *nla;
+       unsigned long used;
+       u8 tcp_flags;
+       int err;
+
+       sf_acts = rcu_dereference_protected(flow->sf_acts,
+                                           lockdep_genl_is_held());
+
+       ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
+       if (!ovs_header)
+               return -EMSGSIZE;
+
+       ovs_header->dp_ifindex = get_dpifindex(dp);
+
+       nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
+       if (!nla)
+               goto nla_put_failure;
+       err = ovs_flow_to_nlattrs(&flow->key, skb);
+       if (err)
+               goto error;
+       nla_nest_end(skb, nla);
+
+       spin_lock_bh(&flow->lock);
+       used = flow->used;
+       stats.n_packets = flow->packet_count;
+       stats.n_bytes = flow->byte_count;
+       tcp_flags = flow->tcp_flags;
+       spin_unlock_bh(&flow->lock);
+
+       if (used)
+               NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+
+       if (stats.n_packets)
+               NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
+                       sizeof(struct ovs_flow_stats), &stats);
+
+       if (tcp_flags)
+               NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+       /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
+        * this is the first flow to be dumped into 'skb'.  This is unusual for
+        * Netlink but individual action lists can be longer than
+        * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
+        * The userspace caller can always fetch the actions separately if it
+        * really wants them.  (Most userspace callers in fact don't care.)
+        *
+        * This can only fail for dump operations because the skb is always
+        * properly sized for single flows.
+        */
+       err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
+                     sf_acts->actions);
+       if (err < 0 && skb_orig_len)
+               goto error;
+
+       return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+       err = -EMSGSIZE;
+error:
+       genlmsg_cancel(skb, ovs_header);
+       return err;
+}
+
+static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
+{
+       const struct sw_flow_actions *sf_acts;
+       int len;
+
+       sf_acts = rcu_dereference_protected(flow->sf_acts,
+                                           lockdep_genl_is_held());
+
+       /* OVS_FLOW_ATTR_KEY */
+       len = nla_total_size(FLOW_BUFSIZE);
+       /* OVS_FLOW_ATTR_ACTIONS */
+       len += nla_total_size(sf_acts->actions_len);
+       /* OVS_FLOW_ATTR_STATS */
+       len += nla_total_size(sizeof(struct ovs_flow_stats));
+       /* OVS_FLOW_ATTR_TCP_FLAGS */
+       len += nla_total_size(1);
+       /* OVS_FLOW_ATTR_USED */
+       len += nla_total_size(8);
+
+       len += NLMSG_ALIGN(sizeof(struct ovs_header));
+
+       return genlmsg_new(len, GFP_KERNEL);
+}
+
+static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
+                                              struct datapath *dp,
+                                              u32 pid, u32 seq, u8 cmd)
+{
+       struct sk_buff *skb;
+       int retval;
+
+       skb = ovs_flow_cmd_alloc_info(flow);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
+       BUG_ON(retval < 0);
+       return skb;
+}
+
+static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sw_flow_key key;
+       struct sw_flow *flow;
+       struct sk_buff *reply;
+       struct datapath *dp;
+       struct flow_table *table;
+       int error;
+       int key_len;
+
+       /* Extract key. */
+       error = -EINVAL;
+       if (!a[OVS_FLOW_ATTR_KEY])
+               goto error;
+       error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+       if (error)
+               goto error;
+
+       /* Validate actions. */
+       if (a[OVS_FLOW_ATTR_ACTIONS]) {
+               error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
+               if (error)
+                       goto error;
+       } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
+               error = -EINVAL;
+               goto error;
+       }
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       error = -ENODEV;
+       if (!dp)
+               goto error;
+
+       table = genl_dereference(dp->table);
+       flow = ovs_flow_tbl_lookup(table, &key, key_len);
+       if (!flow) {
+               struct sw_flow_actions *acts;
+
+               /* Bail out if we're not allowed to create a new flow. */
+               error = -ENOENT;
+               if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
+                       goto error;
+
+               /* Expand table, if necessary, to make room. */
+               if (ovs_flow_tbl_need_to_expand(table)) {
+                       struct flow_table *new_table;
+
+                       new_table = ovs_flow_tbl_expand(table);
+                       if (!IS_ERR(new_table)) {
+                               rcu_assign_pointer(dp->table, new_table);
+                               ovs_flow_tbl_deferred_destroy(table);
+                               table = genl_dereference(dp->table);
+                       }
+               }
+
+               /* Allocate flow. */
+               flow = ovs_flow_alloc();
+               if (IS_ERR(flow)) {
+                       error = PTR_ERR(flow);
+                       goto error;
+               }
+               flow->key = key;
+               clear_stats(flow);
+
+               /* Obtain actions. */
+               acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+               error = PTR_ERR(acts);
+               if (IS_ERR(acts))
+                       goto error_free_flow;
+               rcu_assign_pointer(flow->sf_acts, acts);
+
+               /* Put flow in bucket. */
+               flow->hash = ovs_flow_hash(&key, key_len);
+               ovs_flow_tbl_insert(table, flow);
+
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+                                               info->snd_seq,
+                                               OVS_FLOW_CMD_NEW);
+       } else {
+               /* We found a matching flow. */
+               struct sw_flow_actions *old_acts;
+               struct nlattr *acts_attrs;
+
+               /* Bail out if we're not allowed to modify an existing flow.
+                * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
+                * because Generic Netlink treats the latter as a dump
+                * request.  We also accept NLM_F_EXCL in case that bug ever
+                * gets fixed.
+                */
+               error = -EEXIST;
+               if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
+                   info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+                       goto error;
+
+               /* Update actions. */
+               old_acts = rcu_dereference_protected(flow->sf_acts,
+                                                    lockdep_genl_is_held());
+               acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
+               if (acts_attrs &&
+                  (old_acts->actions_len != nla_len(acts_attrs) ||
+                  memcmp(old_acts->actions, nla_data(acts_attrs),
+                         old_acts->actions_len))) {
+                       struct sw_flow_actions *new_acts;
+
+                       new_acts = ovs_flow_actions_alloc(acts_attrs);
+                       error = PTR_ERR(new_acts);
+                       if (IS_ERR(new_acts))
+                               goto error;
+
+                       rcu_assign_pointer(flow->sf_acts, new_acts);
+                       ovs_flow_deferred_free_acts(old_acts);
+               }
+
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+                                              info->snd_seq, OVS_FLOW_CMD_NEW);
+
+               /* Clear stats. */
+               if (a[OVS_FLOW_ATTR_CLEAR]) {
+                       spin_lock_bh(&flow->lock);
+                       clear_stats(flow);
+                       spin_unlock_bh(&flow->lock);
+               }
+       }
+
+       if (!IS_ERR(reply))
+               genl_notify(reply, genl_info_net(info), info->snd_pid,
+                          ovs_dp_flow_multicast_group.id, info->nlhdr,
+                          GFP_KERNEL);
+       else
+               netlink_set_err(init_net.genl_sock, 0,
+                               ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
+       return 0;
+
+error_free_flow:
+       ovs_flow_free(flow);
+error:
+       return error;
+}
+
+static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sw_flow_key key;
+       struct sk_buff *reply;
+       struct sw_flow *flow;
+       struct datapath *dp;
+       struct flow_table *table;
+       int err;
+       int key_len;
+
+       if (!a[OVS_FLOW_ATTR_KEY])
+               return -EINVAL;
+       err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+       if (err)
+               return err;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       table = genl_dereference(dp->table);
+       flow = ovs_flow_tbl_lookup(table, &key, key_len);
+       if (!flow)
+               return -ENOENT;
+
+       reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+                                       info->snd_seq, OVS_FLOW_CMD_NEW);
+       if (IS_ERR(reply))
+               return PTR_ERR(reply);
+
+       return genlmsg_reply(reply, info);
+}
+
+static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sw_flow_key key;
+       struct sk_buff *reply;
+       struct sw_flow *flow;
+       struct datapath *dp;
+       struct flow_table *table;
+       int err;
+       int key_len;
+
+       if (!a[OVS_FLOW_ATTR_KEY])
+               return flush_flows(ovs_header->dp_ifindex);
+       err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+       if (err)
+               return err;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       table = genl_dereference(dp->table);
+       flow = ovs_flow_tbl_lookup(table, &key, key_len);
+       if (!flow)
+               return -ENOENT;
+
+       reply = ovs_flow_cmd_alloc_info(flow);
+       if (!reply)
+               return -ENOMEM;
+
+       ovs_flow_tbl_remove(table, flow);
+
+       err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
+                                    info->snd_seq, 0, OVS_FLOW_CMD_DEL);
+       BUG_ON(err < 0);
+
+       ovs_flow_deferred_free(flow);
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+       return 0;
+}
+
+static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+       struct datapath *dp;
+       struct flow_table *table;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       table = genl_dereference(dp->table);
+
+       for (;;) {
+               struct sw_flow *flow;
+               u32 bucket, obj;
+
+               bucket = cb->args[0];
+               obj = cb->args[1];
+               flow = ovs_flow_tbl_next(table, &bucket, &obj);
+               if (!flow)
+                       break;
+
+               if (ovs_flow_cmd_fill_info(flow, dp, skb,
+                                          NETLINK_CB(cb->skb).pid,
+                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                          OVS_FLOW_CMD_NEW) < 0)
+                       break;
+
+               cb->args[0] = bucket;
+               cb->args[1] = obj;
+       }
+       return skb->len;
+}
+
+static struct genl_ops dp_flow_genl_ops[] = {
+       { .cmd = OVS_FLOW_CMD_NEW,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_new_or_set
+       },
+       { .cmd = OVS_FLOW_CMD_DEL,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_del
+       },
+       { .cmd = OVS_FLOW_CMD_GET,
+         .flags = 0,               /* OK for unprivileged users. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_get,
+         .dumpit = ovs_flow_cmd_dump
+       },
+       { .cmd = OVS_FLOW_CMD_SET,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_new_or_set,
+       },
+};
+
+static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+       [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+};
+
+static struct genl_family dp_datapath_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_DATAPATH_FAMILY,
+       .version = OVS_DATAPATH_VERSION,
+       .maxattr = OVS_DP_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
+       .name = OVS_DATAPATH_MCGROUP
+};
+
+static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
+                               u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+       struct ovs_header *ovs_header;
+       struct ovs_dp_stats dp_stats;
+       int err;
+
+       ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
+                                  flags, cmd);
+       if (!ovs_header)
+               goto error;
+
+       ovs_header->dp_ifindex = get_dpifindex(dp);
+
+       rcu_read_lock();
+       err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
+       rcu_read_unlock();
+       if (err)
+               goto nla_put_failure;
+
+       get_dp_stats(dp, &dp_stats);
+       NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+
+       return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+       genlmsg_cancel(skb, ovs_header);
+error:
+       return -EMSGSIZE;
+}
+
+static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
+                                            u32 seq, u8 cmd)
+{
+       struct sk_buff *skb;
+       int retval;
+
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
+       if (retval < 0) {
+               kfree_skb(skb);
+               return ERR_PTR(retval);
+       }
+       return skb;
+}
+
+/* Called with genl_mutex and optionally with RTNL lock also. */
+static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+                                       struct nlattr *a[OVS_DP_ATTR_MAX + 1])
+{
+       struct datapath *dp;
+
+       if (!a[OVS_DP_ATTR_NAME])
+               dp = get_dp(ovs_header->dp_ifindex);
+       else {
+               struct vport *vport;
+
+               rcu_read_lock();
+               vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+               dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
+               rcu_read_unlock();
+       }
+       return dp ? dp : ERR_PTR(-ENODEV);
+}
+
+static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct vport_parms parms;
+       struct sk_buff *reply;
+       struct datapath *dp;
+       struct vport *vport;
+       int err;
+
+       err = -EINVAL;
+       if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
+               goto err;
+
+       rtnl_lock();
+       err = -ENODEV;
+       if (!try_module_get(THIS_MODULE))
+               goto err_unlock_rtnl;
+
+       err = -ENOMEM;
+       dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+       if (dp == NULL)
+               goto err_put_module;
+       INIT_LIST_HEAD(&dp->port_list);
+
+       /* Allocate table. */
+       err = -ENOMEM;
+       rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
+       if (!dp->table)
+               goto err_free_dp;
+
+       dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+       if (!dp->stats_percpu) {
+               err = -ENOMEM;
+               goto err_destroy_table;
+       }
+
+       /* Set up our datapath device. */
+       parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
+       parms.type = OVS_VPORT_TYPE_INTERNAL;
+       parms.options = NULL;
+       parms.dp = dp;
+       parms.port_no = OVSP_LOCAL;
+       parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+
+       vport = new_vport(&parms);
+       if (IS_ERR(vport)) {
+               err = PTR_ERR(vport);
+               if (err == -EBUSY)
+                       err = -EEXIST;
+
+               goto err_destroy_percpu;
+       }
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_NEW);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto err_destroy_local_port;
+
+       list_add_tail(&dp->list_node, &dps);
+       rtnl_unlock();
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_datapath_multicast_group.id, info->nlhdr,
+                   GFP_KERNEL);
+       return 0;
+
+err_destroy_local_port:
+       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+err_destroy_percpu:
+       free_percpu(dp->stats_percpu);
+err_destroy_table:
+       ovs_flow_tbl_destroy(genl_dereference(dp->table));
+err_free_dp:
+       kfree(dp);
+err_put_module:
+       module_put(THIS_MODULE);
+err_unlock_rtnl:
+       rtnl_unlock();
+err:
+       return err;
+}
+
+static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct vport *vport, *next_vport;
+       struct sk_buff *reply;
+       struct datapath *dp;
+       int err;
+
+       rtnl_lock();
+       dp = lookup_datapath(info->userhdr, info->attrs);
+       err = PTR_ERR(dp);
+       if (IS_ERR(dp))
+               goto exit_unlock;
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_DEL);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto exit_unlock;
+
+       list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
+               if (vport->port_no != OVSP_LOCAL)
+                       ovs_dp_detach_port(vport);
+
+       list_del(&dp->list_node);
+       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+
+       /* rtnl_unlock() will wait until all the references to devices that
+        * are pending unregistration have been dropped.  We do it here to
+        * ensure that any internal devices (which contain DP pointers) are
+        * fully destroyed before freeing the datapath.
+        */
+       rtnl_unlock();
+
+       call_rcu(&dp->rcu, destroy_dp_rcu);
+       module_put(THIS_MODULE);
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_datapath_multicast_group.id, info->nlhdr,
+                   GFP_KERNEL);
+
+       return 0;
+
+exit_unlock:
+       rtnl_unlock();
+       return err;
+}
+
+static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *reply;
+       struct datapath *dp;
+       int err;
+
+       dp = lookup_datapath(info->userhdr, info->attrs);
+       if (IS_ERR(dp))
+               return PTR_ERR(dp);
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_NEW);
+       if (IS_ERR(reply)) {
+               err = PTR_ERR(reply);
+               netlink_set_err(init_net.genl_sock, 0,
+                               ovs_dp_datapath_multicast_group.id, err);
+               return 0;
+       }
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_datapath_multicast_group.id, info->nlhdr,
+                   GFP_KERNEL);
+
+       return 0;
+}
+
+static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *reply;
+       struct datapath *dp;
+
+       dp = lookup_datapath(info->userhdr, info->attrs);
+       if (IS_ERR(dp))
+               return PTR_ERR(dp);
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_NEW);
+       if (IS_ERR(reply))
+               return PTR_ERR(reply);
+
+       return genlmsg_reply(reply, info);
+}
+
+static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct datapath *dp;
+       int skip = cb->args[0];
+       int i = 0;
+
+       list_for_each_entry(dp, &dps, list_node) {
+               if (i < skip)
+                       continue;
+               if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                        OVS_DP_CMD_NEW) < 0)
+                       break;
+               i++;
+       }
+
+       cb->args[0] = i;
+
+       return skb->len;
+}
+
+static struct genl_ops dp_datapath_genl_ops[] = {
+       { .cmd = OVS_DP_CMD_NEW,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_new
+       },
+       { .cmd = OVS_DP_CMD_DEL,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_del
+       },
+       { .cmd = OVS_DP_CMD_GET,
+         .flags = 0,               /* OK for unprivileged users. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_get,
+         .dumpit = ovs_dp_cmd_dump
+       },
+       { .cmd = OVS_DP_CMD_SET,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_set,
+       },
+};
+
+static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+       [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
+       [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_family dp_vport_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_VPORT_FAMILY,
+       .version = OVS_VPORT_VERSION,
+       .maxattr = OVS_VPORT_ATTR_MAX
+};
+
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
+       .name = OVS_VPORT_MCGROUP
+};
+
+/* Called with RTNL lock or RCU read lock. */
+static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+                                  u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+       struct ovs_header *ovs_header;
+       struct ovs_vport_stats vport_stats;
+       int err;
+
+       ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
+                                flags, cmd);
+       if (!ovs_header)
+               return -EMSGSIZE;
+
+       ovs_header->dp_ifindex = get_dpifindex(vport->dp);
+
+       NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
+       NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
+       NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
+       NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+
+       ovs_vport_get_stats(vport, &vport_stats);
+       NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+               &vport_stats);
+
+       err = ovs_vport_get_options(vport, skb);
+       if (err == -EMSGSIZE)
+               goto error;
+
+       return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+       err = -EMSGSIZE;
+error:
+       genlmsg_cancel(skb, ovs_header);
+       return err;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
+                                        u32 seq, u8 cmd)
+{
+       struct sk_buff *skb;
+       int retval;
+
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
+       if (retval < 0) {
+               kfree_skb(skb);
+               return ERR_PTR(retval);
+       }
+       return skb;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+static struct vport *lookup_vport(struct ovs_header *ovs_header,
+                                 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
+{
+       struct datapath *dp;
+       struct vport *vport;
+
+       if (a[OVS_VPORT_ATTR_NAME]) {
+               vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+               if (!vport)
+                       return ERR_PTR(-ENODEV);
+               return vport;
+       } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
+               u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+               if (port_no >= DP_MAX_PORTS)
+                       return ERR_PTR(-EFBIG);
+
+               dp = get_dp(ovs_header->dp_ifindex);
+               if (!dp)
+                       return ERR_PTR(-ENODEV);
+
+               vport = rcu_dereference_rtnl(dp->ports[port_no]);
+               if (!vport)
+                       return ERR_PTR(-ENOENT);
+               return vport;
+       } else
+               return ERR_PTR(-EINVAL);
+}
+
+static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct vport_parms parms;
+       struct sk_buff *reply;
+       struct vport *vport;
+       struct datapath *dp;
+       u32 port_no;
+       int err;
+
+       err = -EINVAL;
+       if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
+           !a[OVS_VPORT_ATTR_UPCALL_PID])
+               goto exit;
+
+       rtnl_lock();
+       dp = get_dp(ovs_header->dp_ifindex);
+       err = -ENODEV;
+       if (!dp)
+               goto exit_unlock;
+
+       if (a[OVS_VPORT_ATTR_PORT_NO]) {
+               port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+               err = -EFBIG;
+               if (port_no >= DP_MAX_PORTS)
+                       goto exit_unlock;
+
+               vport = rtnl_dereference(dp->ports[port_no]);
+               err = -EBUSY;
+               if (vport)
+                       goto exit_unlock;
+       } else {
+               for (port_no = 1; ; port_no++) {
+                       if (port_no >= DP_MAX_PORTS) {
+                               err = -EFBIG;
+                               goto exit_unlock;
+                       }
+                       vport = rtnl_dereference(dp->ports[port_no]);
+                       if (!vport)
+                               break;
+               }
+       }
+
+       parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
+       parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
+       parms.options = a[OVS_VPORT_ATTR_OPTIONS];
+       parms.dp = dp;
+       parms.port_no = port_no;
+       parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+       vport = new_vport(&parms);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_NEW);
+       if (IS_ERR(reply)) {
+               err = PTR_ERR(reply);
+               ovs_dp_detach_port(vport);
+               goto exit_unlock;
+       }
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+       rtnl_unlock();
+exit:
+       return err;
+}
+
+static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct sk_buff *reply;
+       struct vport *vport;
+       int err;
+
+       rtnl_lock();
+       vport = lookup_vport(info->userhdr, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       err = 0;
+       if (a[OVS_VPORT_ATTR_TYPE] &&
+           nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
+               err = -EINVAL;
+
+       if (!err && a[OVS_VPORT_ATTR_OPTIONS])
+               err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
+       if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
+               vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_NEW);
+       if (IS_ERR(reply)) {
+               err = PTR_ERR(reply);
+               netlink_set_err(init_net.genl_sock, 0,
+                               ovs_dp_vport_multicast_group.id, err);
+               return 0;
+       }
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+       rtnl_unlock();
+       return err;
+}
+
+static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct sk_buff *reply;
+       struct vport *vport;
+       int err;
+
+       rtnl_lock();
+       vport = lookup_vport(info->userhdr, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       if (vport->port_no == OVSP_LOCAL) {
+               err = -EINVAL;
+               goto exit_unlock;
+       }
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_DEL);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto exit_unlock;
+
+       ovs_dp_detach_port(vport);
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+       rtnl_unlock();
+       return err;
+}
+
+static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sk_buff *reply;
+       struct vport *vport;
+       int err;
+
+       rcu_read_lock();
+       vport = lookup_vport(ovs_header, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_NEW);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto exit_unlock;
+
+       rcu_read_unlock();
+
+       return genlmsg_reply(reply, info);
+
+exit_unlock:
+       rcu_read_unlock();
+       return err;
+}
+
+static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+       struct datapath *dp;
+       u32 port_no;
+       int retval;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       rcu_read_lock();
+       for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
+               struct vport *vport;
+
+               vport = rcu_dereference(dp->ports[port_no]);
+               if (!vport)
+                       continue;
+
+               if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
+                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                           OVS_VPORT_CMD_NEW) < 0)
+                       break;
+       }
+       rcu_read_unlock();
+
+       cb->args[0] = port_no;
+       retval = skb->len;
+
+       return retval;
+}
+
+static void rehash_flow_table(struct work_struct *work)
+{
+       struct datapath *dp;
+
+       genl_lock();
+
+       list_for_each_entry(dp, &dps, list_node) {
+               struct flow_table *old_table = genl_dereference(dp->table);
+               struct flow_table *new_table;
+
+               new_table = ovs_flow_tbl_rehash(old_table);
+               if (!IS_ERR(new_table)) {
+                       rcu_assign_pointer(dp->table, new_table);
+                       ovs_flow_tbl_deferred_destroy(old_table);
+               }
+       }
+
+       genl_unlock();
+
+       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+}
+
+static struct genl_ops dp_vport_genl_ops[] = {
+       { .cmd = OVS_VPORT_CMD_NEW,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_new
+       },
+       { .cmd = OVS_VPORT_CMD_DEL,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_del
+       },
+       { .cmd = OVS_VPORT_CMD_GET,
+         .flags = 0,               /* OK for unprivileged users. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_get,
+         .dumpit = ovs_vport_cmd_dump
+       },
+       { .cmd = OVS_VPORT_CMD_SET,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_set,
+       },
+};
+
+struct genl_family_and_ops {
+       struct genl_family *family;
+       struct genl_ops *ops;
+       int n_ops;
+       struct genl_multicast_group *group;
+};
+
+static const struct genl_family_and_ops dp_genl_families[] = {
+       { &dp_datapath_genl_family,
+         dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
+         &ovs_dp_datapath_multicast_group },
+       { &dp_vport_genl_family,
+         dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
+         &ovs_dp_vport_multicast_group },
+       { &dp_flow_genl_family,
+         dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
+         &ovs_dp_flow_multicast_group },
+       { &dp_packet_genl_family,
+         dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
+         NULL },
+};
+
+static void dp_unregister_genl(int n_families)
+{
+       int i;
+
+       for (i = 0; i < n_families; i++)
+               genl_unregister_family(dp_genl_families[i].family);
+}
+
+static int dp_register_genl(void)
+{
+       int n_registered;
+       int err;
+       int i;
+
+       n_registered = 0;
+       for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
+               const struct genl_family_and_ops *f = &dp_genl_families[i];
+
+               err = genl_register_family_with_ops(f->family, f->ops,
+                                                   f->n_ops);
+               if (err)
+                       goto error;
+               n_registered++;
+
+               if (f->group) {
+                       err = genl_register_mc_group(f->family, f->group);
+                       if (err)
+                               goto error;
+               }
+       }
+
+       return 0;
+
+error:
+       dp_unregister_genl(n_registered);
+       return err;
+}
+
+static int __init dp_init(void)
+{
+       struct sk_buff *dummy_skb;
+       int err;
+
+       BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
+
+       pr_info("Open vSwitch switching datapath\n");
+
+       err = ovs_flow_init();
+       if (err)
+               goto error;
+
+       err = ovs_vport_init();
+       if (err)
+               goto error_flow_exit;
+
+       err = register_netdevice_notifier(&ovs_dp_device_notifier);
+       if (err)
+               goto error_vport_exit;
+
+       err = dp_register_genl();
+       if (err < 0)
+               goto error_unreg_notifier;
+
+       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+
+       return 0;
+
+error_unreg_notifier:
+       unregister_netdevice_notifier(&ovs_dp_device_notifier);
+error_vport_exit:
+       ovs_vport_exit();
+error_flow_exit:
+       ovs_flow_exit();
+error:
+       return err;
+}
+
+static void dp_cleanup(void)
+{
+       cancel_delayed_work_sync(&rehash_flow_wq);
+       rcu_barrier();
+       dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
+       unregister_netdevice_notifier(&ovs_dp_device_notifier);
+       ovs_vport_exit();
+       ovs_flow_exit();
+}
+
+module_init(dp_init);
+module_exit(dp_cleanup);
+
+MODULE_DESCRIPTION("Open vSwitch switching datapath");
+MODULE_LICENSE("GPL");
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
new file mode 100644 (file)
index 0000000..5b9f884
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef DATAPATH_H
+#define DATAPATH_H 1
+
+#include <asm/page.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/version.h>
+
+#include "flow.h"
+
+struct vport;
+
+#define DP_MAX_PORTS 1024
+#define SAMPLE_ACTION_DEPTH 3
+
+/**
+ * struct dp_stats_percpu - per-cpu packet processing statistics for a given
+ * datapath.
+ * @n_hit: Number of received packets for which a matching flow was found in
+ * the flow table.
+ * @n_miss: Number of received packets that had no matching flow in the flow
+ * table.  The sum of @n_hit and @n_miss is the number of packets that have
+ * been received by the datapath.
+ * @n_lost: Number of received packets that had no matching flow in the flow
+ * table that could not be sent to userspace (normally due to an overflow in
+ * one of the datapath's queues).
+ */
+struct dp_stats_percpu {
+       u64 n_hit;
+       u64 n_missed;
+       u64 n_lost;
+       struct u64_stats_sync sync;
+};
+
+/**
+ * struct datapath - datapath for flow-based packet switching
+ * @rcu: RCU callback head for deferred destruction.
+ * @list_node: Element in global 'dps' list.
+ * @n_flows: Number of flows currently in flow table.
+ * @table: Current flow table.  Protected by genl_lock and RCU.
+ * @ports: Map from port number to &struct vport.  %OVSP_LOCAL port
+ * always exists, other ports may be %NULL.  Protected by RTNL and RCU.
+ * @port_list: List of all ports in @ports in arbitrary order.  RTNL required
+ * to iterate or modify.
+ * @stats_percpu: Per-CPU datapath statistics.
+ *
+ * Context: See the comment on locking at the top of datapath.c for additional
+ * locking information.
+ */
+struct datapath {
+       struct rcu_head rcu;
+       struct list_head list_node;
+
+       /* Flow table. */
+       struct flow_table __rcu *table;
+
+       /* Switch ports. */
+       struct vport __rcu *ports[DP_MAX_PORTS];
+       struct list_head port_list;
+
+       /* Stats. */
+       struct dp_stats_percpu __percpu *stats_percpu;
+};
+
+/**
+ * struct ovs_skb_cb - OVS data in skb CB
+ * @flow: The flow associated with this packet.  May be %NULL if no flow.
+ */
+struct ovs_skb_cb {
+       struct sw_flow          *flow;
+};
+#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
+
+/**
+ * struct dp_upcall - metadata to include with a packet to send to userspace
+ * @cmd: One of %OVS_PACKET_CMD_*.
+ * @key: Becomes %OVS_PACKET_ATTR_KEY.  Must be nonnull.
+ * @userdata: If nonnull, its u64 value is extracted and passed to userspace as
+ * %OVS_PACKET_ATTR_USERDATA.
+ * @pid: Netlink PID to which packet should be sent.  If @pid is 0 then no
+ * packet is sent and the packet is accounted in the datapath's @n_lost
+ * counter.
+ */
+struct dp_upcall_info {
+       u8 cmd;
+       const struct sw_flow_key *key;
+       const struct nlattr *userdata;
+       u32 pid;
+};
+
+extern struct notifier_block ovs_dp_device_notifier;
+extern struct genl_multicast_group ovs_dp_vport_multicast_group;
+
+void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
+void ovs_dp_detach_port(struct vport *);
+int ovs_dp_upcall(struct datapath *, struct sk_buff *,
+                 const struct dp_upcall_info *);
+
+const char *ovs_dp_name(const struct datapath *dp);
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
+                                        u8 cmd);
+
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
+#endif /* datapath.h */
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
new file mode 100644 (file)
index 0000000..4673651
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/netdevice.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+static int dp_device_event(struct notifier_block *unused, unsigned long event,
+                          void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct vport *vport;
+
+       if (ovs_is_internal_dev(dev))
+               vport = ovs_internal_dev_get_vport(dev);
+       else
+               vport = ovs_netdev_get_vport(dev);
+
+       if (!vport)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               if (!ovs_is_internal_dev(dev)) {
+                       struct sk_buff *notify;
+
+                       notify = ovs_vport_cmd_build_info(vport, 0, 0,
+                                                         OVS_VPORT_CMD_DEL);
+                       ovs_dp_detach_port(vport);
+                       if (IS_ERR(notify)) {
+                               netlink_set_err(init_net.genl_sock, 0,
+                                               ovs_dp_vport_multicast_group.id,
+                                               PTR_ERR(notify));
+                               break;
+                       }
+
+                       genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
+                                         GFP_KERNEL);
+               }
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+struct notifier_block ovs_dp_device_notifier = {
+       .notifier_call = dp_device_event
+};
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
new file mode 100644 (file)
index 0000000..fe7f020
--- /dev/null
@@ -0,0 +1,1346 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+static struct kmem_cache *flow_cache;
+
+static int check_header(struct sk_buff *skb, int len)
+{
+       if (unlikely(skb->len < len))
+               return -EINVAL;
+       if (unlikely(!pskb_may_pull(skb, len)))
+               return -ENOMEM;
+       return 0;
+}
+
+static bool arphdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_network_offset(skb) +
+                                 sizeof(struct arp_eth_header));
+}
+
+static int check_iphdr(struct sk_buff *skb)
+{
+       unsigned int nh_ofs = skb_network_offset(skb);
+       unsigned int ip_len;
+       int err;
+
+       err = check_header(skb, nh_ofs + sizeof(struct iphdr));
+       if (unlikely(err))
+               return err;
+
+       ip_len = ip_hdrlen(skb);
+       if (unlikely(ip_len < sizeof(struct iphdr) ||
+                    skb->len < nh_ofs + ip_len))
+               return -EINVAL;
+
+       skb_set_transport_header(skb, nh_ofs + ip_len);
+       return 0;
+}
+
+static bool tcphdr_ok(struct sk_buff *skb)
+{
+       int th_ofs = skb_transport_offset(skb);
+       int tcp_len;
+
+       if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
+               return false;
+
+       tcp_len = tcp_hdrlen(skb);
+       if (unlikely(tcp_len < sizeof(struct tcphdr) ||
+                    skb->len < th_ofs + tcp_len))
+               return false;
+
+       return true;
+}
+
+static bool udphdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_transport_offset(skb) +
+                                 sizeof(struct udphdr));
+}
+
+static bool icmphdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_transport_offset(skb) +
+                                 sizeof(struct icmphdr));
+}
+
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
+{
+       struct timespec cur_ts;
+       u64 cur_ms, idle_ms;
+
+       ktime_get_ts(&cur_ts);
+       idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
+       cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+                cur_ts.tv_nsec / NSEC_PER_MSEC;
+
+       return cur_ms - idle_ms;
+}
+
+#define SW_FLOW_KEY_OFFSET(field)              \
+       (offsetof(struct sw_flow_key, field) +  \
+        FIELD_SIZEOF(struct sw_flow_key, field))
+
+static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
+                        int *key_lenp)
+{
+       unsigned int nh_ofs = skb_network_offset(skb);
+       unsigned int nh_len;
+       int payload_ofs;
+       struct ipv6hdr *nh;
+       uint8_t nexthdr;
+       __be16 frag_off;
+       int err;
+
+       *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
+
+       err = check_header(skb, nh_ofs + sizeof(*nh));
+       if (unlikely(err))
+               return err;
+
+       nh = ipv6_hdr(skb);
+       nexthdr = nh->nexthdr;
+       payload_ofs = (u8 *)(nh + 1) - skb->data;
+
+       key->ip.proto = NEXTHDR_NONE;
+       key->ip.tos = ipv6_get_dsfield(nh);
+       key->ip.ttl = nh->hop_limit;
+       key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+       key->ipv6.addr.src = nh->saddr;
+       key->ipv6.addr.dst = nh->daddr;
+
+       payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
+       if (unlikely(payload_ofs < 0))
+               return -EINVAL;
+
+       if (frag_off) {
+               if (frag_off & htons(~0x7))
+                       key->ip.frag = OVS_FRAG_TYPE_LATER;
+               else
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+       }
+
+       nh_len = payload_ofs - nh_ofs;
+       skb_set_transport_header(skb, nh_ofs + nh_len);
+       key->ip.proto = nexthdr;
+       return nh_len;
+}
+
+static bool icmp6hdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_transport_offset(skb) +
+                                 sizeof(struct icmp6hdr));
+}
+
+#define TCP_FLAGS_OFFSET 13
+#define TCP_FLAG_MASK 0x3f
+
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
+{
+       u8 tcp_flags = 0;
+
+       if (flow->key.eth.type == htons(ETH_P_IP) &&
+           flow->key.ip.proto == IPPROTO_TCP) {
+               u8 *tcp = (u8 *)tcp_hdr(skb);
+               tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
+       }
+
+       spin_lock(&flow->lock);
+       flow->used = jiffies;
+       flow->packet_count++;
+       flow->byte_count += skb->len;
+       flow->tcp_flags |= tcp_flags;
+       spin_unlock(&flow->lock);
+}
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
+{
+       int actions_len = nla_len(actions);
+       struct sw_flow_actions *sfa;
+
+       /* At least DP_MAX_PORTS actions are required to be able to flood a
+        * packet to every port.  Factor of 2 allows for setting VLAN tags,
+        * etc. */
+       if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
+               return ERR_PTR(-EINVAL);
+
+       sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
+       if (!sfa)
+               return ERR_PTR(-ENOMEM);
+
+       sfa->actions_len = actions_len;
+       memcpy(sfa->actions, nla_data(actions), actions_len);
+       return sfa;
+}
+
+struct sw_flow *ovs_flow_alloc(void)
+{
+       struct sw_flow *flow;
+
+       flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+       if (!flow)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&flow->lock);
+       flow->sf_acts = NULL;
+
+       return flow;
+}
+
+static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
+{
+       hash = jhash_1word(hash, table->hash_seed);
+       return flex_array_get(table->buckets,
+                               (hash & (table->n_buckets - 1)));
+}
+
+static struct flex_array *alloc_buckets(unsigned int n_buckets)
+{
+       struct flex_array *buckets;
+       int i, err;
+
+       buckets = flex_array_alloc(sizeof(struct hlist_head *),
+                                  n_buckets, GFP_KERNEL);
+       if (!buckets)
+               return NULL;
+
+       err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
+       if (err) {
+               flex_array_free(buckets);
+               return NULL;
+       }
+
+       for (i = 0; i < n_buckets; i++)
+               INIT_HLIST_HEAD((struct hlist_head *)
+                                       flex_array_get(buckets, i));
+
+       return buckets;
+}
+
+static void free_buckets(struct flex_array *buckets)
+{
+       flex_array_free(buckets);
+}
+
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
+{
+       struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
+
+       if (!table)
+               return NULL;
+
+       table->buckets = alloc_buckets(new_size);
+
+       if (!table->buckets) {
+               kfree(table);
+               return NULL;
+       }
+       table->n_buckets = new_size;
+       table->count = 0;
+       table->node_ver = 0;
+       table->keep_flows = false;
+       get_random_bytes(&table->hash_seed, sizeof(u32));
+
+       return table;
+}
+
+void ovs_flow_tbl_destroy(struct flow_table *table)
+{
+       int i;
+
+       if (!table)
+               return;
+
+       if (table->keep_flows)
+               goto skip_flows;
+
+       for (i = 0; i < table->n_buckets; i++) {
+               struct sw_flow *flow;
+               struct hlist_head *head = flex_array_get(table->buckets, i);
+               struct hlist_node *node, *n;
+               int ver = table->node_ver;
+
+               hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
+                       hlist_del_rcu(&flow->hash_node[ver]);
+                       ovs_flow_free(flow);
+               }
+       }
+
+skip_flows:
+       free_buckets(table->buckets);
+       kfree(table);
+}
+
+static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
+{
+       struct flow_table *table = container_of(rcu, struct flow_table, rcu);
+
+       ovs_flow_tbl_destroy(table);
+}
+
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
+{
+       if (!table)
+               return;
+
+       call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+}
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+{
+       struct sw_flow *flow;
+       struct hlist_head *head;
+       struct hlist_node *n;
+       int ver;
+       int i;
+
+       ver = table->node_ver;
+       while (*bucket < table->n_buckets) {
+               i = 0;
+               head = flex_array_get(table->buckets, *bucket);
+               hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
+                       if (i < *last) {
+                               i++;
+                               continue;
+                       }
+                       *last = i + 1;
+                       return flow;
+               }
+               (*bucket)++;
+               *last = 0;
+       }
+
+       return NULL;
+}
+
+static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
+{
+       int old_ver;
+       int i;
+
+       old_ver = old->node_ver;
+       new->node_ver = !old_ver;
+
+       /* Insert in new table. */
+       for (i = 0; i < old->n_buckets; i++) {
+               struct sw_flow *flow;
+               struct hlist_head *head;
+               struct hlist_node *n;
+
+               head = flex_array_get(old->buckets, i);
+
+               hlist_for_each_entry(flow, n, head, hash_node[old_ver])
+                       ovs_flow_tbl_insert(new, flow);
+       }
+       old->keep_flows = true;
+}
+
+static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
+{
+       struct flow_table *new_table;
+
+       new_table = ovs_flow_tbl_alloc(n_buckets);
+       if (!new_table)
+               return ERR_PTR(-ENOMEM);
+
+       flow_table_copy_flows(table, new_table);
+
+       return new_table;
+}
+
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
+{
+       return __flow_tbl_rehash(table, table->n_buckets);
+}
+
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
+{
+       return __flow_tbl_rehash(table, table->n_buckets * 2);
+}
+
+void ovs_flow_free(struct sw_flow *flow)
+{
+       if (unlikely(!flow))
+               return;
+
+       kfree((struct sf_flow_acts __force *)flow->sf_acts);
+       kmem_cache_free(flow_cache, flow);
+}
+
+/* RCU callback used by ovs_flow_deferred_free. */
+static void rcu_free_flow_callback(struct rcu_head *rcu)
+{
+       struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
+
+       ovs_flow_free(flow);
+}
+
+/* Schedules 'flow' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free(struct sw_flow *flow)
+{
+       call_rcu(&flow->rcu, rcu_free_flow_callback);
+}
+
+/* RCU callback used by ovs_flow_deferred_free_acts. */
+static void rcu_free_acts_callback(struct rcu_head *rcu)
+{
+       struct sw_flow_actions *sf_acts = container_of(rcu,
+                       struct sw_flow_actions, rcu);
+       kfree(sf_acts);
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
+{
+       call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+}
+
+static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+{
+       struct qtag_prefix {
+               __be16 eth_type; /* ETH_P_8021Q */
+               __be16 tci;
+       };
+       struct qtag_prefix *qp;
+
+       if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
+               return 0;
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
+                                        sizeof(__be16))))
+               return -ENOMEM;
+
+       qp = (struct qtag_prefix *) skb->data;
+       key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
+       __skb_pull(skb, sizeof(struct qtag_prefix));
+
+       return 0;
+}
+
+static __be16 parse_ethertype(struct sk_buff *skb)
+{
+       struct llc_snap_hdr {
+               u8  dsap;  /* Always 0xAA */
+               u8  ssap;  /* Always 0xAA */
+               u8  ctrl;
+               u8  oui[3];
+               __be16 ethertype;
+       };
+       struct llc_snap_hdr *llc;
+       __be16 proto;
+
+       proto = *(__be16 *) skb->data;
+       __skb_pull(skb, sizeof(__be16));
+
+       if (ntohs(proto) >= 1536)
+               return proto;
+
+       if (skb->len < sizeof(struct llc_snap_hdr))
+               return htons(ETH_P_802_2);
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
+               return htons(0);
+
+       llc = (struct llc_snap_hdr *) skb->data;
+       if (llc->dsap != LLC_SAP_SNAP ||
+           llc->ssap != LLC_SAP_SNAP ||
+           (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
+               return htons(ETH_P_802_2);
+
+       __skb_pull(skb, sizeof(struct llc_snap_hdr));
+       return llc->ethertype;
+}
+
+static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+                       int *key_lenp, int nh_len)
+{
+       struct icmp6hdr *icmp = icmp6_hdr(skb);
+       int error = 0;
+       int key_len;
+
+       /* The ICMPv6 type and code fields use the 16-bit transport port
+        * fields, so we need to store them in 16-bit network byte order.
+        */
+       key->ipv6.tp.src = htons(icmp->icmp6_type);
+       key->ipv6.tp.dst = htons(icmp->icmp6_code);
+       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+
+       if (icmp->icmp6_code == 0 &&
+           (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+            icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+               int icmp_len = skb->len - skb_transport_offset(skb);
+               struct nd_msg *nd;
+               int offset;
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+               /* In order to process neighbor discovery options, we need the
+                * entire packet.
+                */
+               if (unlikely(icmp_len < sizeof(*nd)))
+                       goto out;
+               if (unlikely(skb_linearize(skb))) {
+                       error = -ENOMEM;
+                       goto out;
+               }
+
+               nd = (struct nd_msg *)skb_transport_header(skb);
+               key->ipv6.nd.target = nd->target;
+               key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+               icmp_len -= sizeof(*nd);
+               offset = 0;
+               while (icmp_len >= 8) {
+                       struct nd_opt_hdr *nd_opt =
+                                (struct nd_opt_hdr *)(nd->opt + offset);
+                       int opt_len = nd_opt->nd_opt_len * 8;
+
+                       if (unlikely(!opt_len || opt_len > icmp_len))
+                               goto invalid;
+
+                       /* Store the link layer address if the appropriate
+                        * option is provided.  It is considered an error if
+                        * the same link layer option is specified twice.
+                        */
+                       if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
+                           && opt_len == 8) {
+                               if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
+                                       goto invalid;
+                               memcpy(key->ipv6.nd.sll,
+                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                       } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
+                                  && opt_len == 8) {
+                               if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
+                                       goto invalid;
+                               memcpy(key->ipv6.nd.tll,
+                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                       }
+
+                       icmp_len -= opt_len;
+                       offset += opt_len;
+               }
+       }
+
+       goto out;
+
+invalid:
+       memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
+       memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
+       memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
+
+out:
+       *key_lenp = key_len;
+       return error;
+}
+
+/**
+ * ovs_flow_extract - extracts a flow key from an Ethernet frame.
+ * @skb: sk_buff that contains the frame, with skb->data pointing to the
+ * Ethernet header
+ * @in_port: port number on which @skb was received.
+ * @key: output flow key
+ * @key_lenp: length of output flow key
+ *
+ * The caller must ensure that skb->len >= ETH_HLEN.
+ *
+ * Returns 0 if successful, otherwise a negative errno value.
+ *
+ * Initializes @skb header pointers as follows:
+ *
+ *    - skb->mac_header: the Ethernet header.
+ *
+ *    - skb->network_header: just past the Ethernet header, or just past the
+ *      VLAN header, to the first byte of the Ethernet payload.
+ *
+ *    - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
+ *      on output, then just past the IP header, if one is present and
+ *      of a correct length, otherwise the same as skb->network_header.
+ *      For other key->dl_type values it is left untouched.
+ */
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
+                int *key_lenp)
+{
+       int error = 0;
+       int key_len = SW_FLOW_KEY_OFFSET(eth);
+       struct ethhdr *eth;
+
+       memset(key, 0, sizeof(*key));
+
+       key->phy.priority = skb->priority;
+       key->phy.in_port = in_port;
+
+       skb_reset_mac_header(skb);
+
+       /* Link layer.  We are guaranteed to have at least the 14 byte Ethernet
+        * header in the linear data area.
+        */
+       eth = eth_hdr(skb);
+       memcpy(key->eth.src, eth->h_source, ETH_ALEN);
+       memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
+
+       __skb_pull(skb, 2 * ETH_ALEN);
+
+       if (vlan_tx_tag_present(skb))
+               key->eth.tci = htons(skb->vlan_tci);
+       else if (eth->h_proto == htons(ETH_P_8021Q))
+               if (unlikely(parse_vlan(skb, key)))
+                       return -ENOMEM;
+
+       key->eth.type = parse_ethertype(skb);
+       if (unlikely(key->eth.type == htons(0)))
+               return -ENOMEM;
+
+       skb_reset_network_header(skb);
+       __skb_push(skb, skb->data - skb_mac_header(skb));
+
+       /* Network layer. */
+       if (key->eth.type == htons(ETH_P_IP)) {
+               struct iphdr *nh;
+               __be16 offset;
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+
+               error = check_iphdr(skb);
+               if (unlikely(error)) {
+                       if (error == -EINVAL) {
+                               skb->transport_header = skb->network_header;
+                               error = 0;
+                       }
+                       goto out;
+               }
+
+               nh = ip_hdr(skb);
+               key->ipv4.addr.src = nh->saddr;
+               key->ipv4.addr.dst = nh->daddr;
+
+               key->ip.proto = nh->protocol;
+               key->ip.tos = nh->tos;
+               key->ip.ttl = nh->ttl;
+
+               offset = nh->frag_off & htons(IP_OFFSET);
+               if (offset) {
+                       key->ip.frag = OVS_FRAG_TYPE_LATER;
+                       goto out;
+               }
+               if (nh->frag_off & htons(IP_MF) ||
+                        skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+               /* Transport layer. */
+               if (key->ip.proto == IPPROTO_TCP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+                       if (tcphdr_ok(skb)) {
+                               struct tcphdr *tcp = tcp_hdr(skb);
+                               key->ipv4.tp.src = tcp->source;
+                               key->ipv4.tp.dst = tcp->dest;
+                       }
+               } else if (key->ip.proto == IPPROTO_UDP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+                       if (udphdr_ok(skb)) {
+                               struct udphdr *udp = udp_hdr(skb);
+                               key->ipv4.tp.src = udp->source;
+                               key->ipv4.tp.dst = udp->dest;
+                       }
+               } else if (key->ip.proto == IPPROTO_ICMP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+                       if (icmphdr_ok(skb)) {
+                               struct icmphdr *icmp = icmp_hdr(skb);
+                               /* The ICMP type and code fields use the 16-bit
+                                * transport port fields, so we need to store
+                                * them in 16-bit network byte order. */
+                               key->ipv4.tp.src = htons(icmp->type);
+                               key->ipv4.tp.dst = htons(icmp->code);
+                       }
+               }
+
+       } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
+               struct arp_eth_header *arp;
+
+               arp = (struct arp_eth_header *)skb_network_header(skb);
+
+               if (arp->ar_hrd == htons(ARPHRD_ETHER)
+                               && arp->ar_pro == htons(ETH_P_IP)
+                               && arp->ar_hln == ETH_ALEN
+                               && arp->ar_pln == 4) {
+
+                       /* We only match on the lower 8 bits of the opcode. */
+                       if (ntohs(arp->ar_op) <= 0xff)
+                               key->ip.proto = ntohs(arp->ar_op);
+
+                       if (key->ip.proto == ARPOP_REQUEST
+                                       || key->ip.proto == ARPOP_REPLY) {
+                               memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
+                               memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
+                               memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
+                               memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+                               key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+                       }
+               }
+       } else if (key->eth.type == htons(ETH_P_IPV6)) {
+               int nh_len;             /* IPv6 Header + Extensions */
+
+               nh_len = parse_ipv6hdr(skb, key, &key_len);
+               if (unlikely(nh_len < 0)) {
+                       if (nh_len == -EINVAL)
+                               skb->transport_header = skb->network_header;
+                       else
+                               error = nh_len;
+                       goto out;
+               }
+
+               if (key->ip.frag == OVS_FRAG_TYPE_LATER)
+                       goto out;
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+               /* Transport layer. */
+               if (key->ip.proto == NEXTHDR_TCP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+                       if (tcphdr_ok(skb)) {
+                               struct tcphdr *tcp = tcp_hdr(skb);
+                               key->ipv6.tp.src = tcp->source;
+                               key->ipv6.tp.dst = tcp->dest;
+                       }
+               } else if (key->ip.proto == NEXTHDR_UDP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+                       if (udphdr_ok(skb)) {
+                               struct udphdr *udp = udp_hdr(skb);
+                               key->ipv6.tp.src = udp->source;
+                               key->ipv6.tp.dst = udp->dest;
+                       }
+               } else if (key->ip.proto == NEXTHDR_ICMP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+                       if (icmp6hdr_ok(skb)) {
+                               error = parse_icmpv6(skb, key, &key_len, nh_len);
+                               if (error < 0)
+                                       goto out;
+                       }
+               }
+       }
+
+out:
+       *key_lenp = key_len;
+       return error;
+}
+
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
+{
+       return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+                               struct sw_flow_key *key, int key_len)
+{
+       struct sw_flow *flow;
+       struct hlist_node *n;
+       struct hlist_head *head;
+       u32 hash;
+
+       hash = ovs_flow_hash(key, key_len);
+
+       head = find_bucket(table, hash);
+       hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
+
+               if (flow->hash == hash &&
+                   !memcmp(&flow->key, key, key_len)) {
+                       return flow;
+               }
+       }
+       return NULL;
+}
+
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+{
+       struct hlist_head *head;
+
+       head = find_bucket(table, flow->hash);
+       hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+       table->count++;
+}
+
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+       hlist_del_rcu(&flow->hash_node[table->node_ver]);
+       table->count--;
+       BUG_ON(table->count < 0);
+}
+
+/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
+const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+       [OVS_KEY_ATTR_ENCAP] = -1,
+       [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
+       [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
+       [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
+       [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
+       [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
+       [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
+       [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
+       [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
+       [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+       [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
+       [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
+       [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
+       [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+};
+
+static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+                                 const struct nlattr *a[], u32 *attrs)
+{
+       const struct ovs_key_icmp *icmp_key;
+       const struct ovs_key_tcp *tcp_key;
+       const struct ovs_key_udp *udp_key;
+
+       switch (swkey->ip.proto) {
+       case IPPROTO_TCP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+               tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+               swkey->ipv4.tp.src = tcp_key->tcp_src;
+               swkey->ipv4.tp.dst = tcp_key->tcp_dst;
+               break;
+
+       case IPPROTO_UDP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+               udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+               swkey->ipv4.tp.src = udp_key->udp_src;
+               swkey->ipv4.tp.dst = udp_key->udp_dst;
+               break;
+
+       case IPPROTO_ICMP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+               icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+               swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
+               swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
+               break;
+       }
+
+       return 0;
+}
+
+static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+                                 const struct nlattr *a[], u32 *attrs)
+{
+       const struct ovs_key_icmpv6 *icmpv6_key;
+       const struct ovs_key_tcp *tcp_key;
+       const struct ovs_key_udp *udp_key;
+
+       switch (swkey->ip.proto) {
+       case IPPROTO_TCP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+               tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+               swkey->ipv6.tp.src = tcp_key->tcp_src;
+               swkey->ipv6.tp.dst = tcp_key->tcp_dst;
+               break;
+
+       case IPPROTO_UDP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+               udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+               swkey->ipv6.tp.src = udp_key->udp_src;
+               swkey->ipv6.tp.dst = udp_key->udp_dst;
+               break;
+
+       case IPPROTO_ICMPV6:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+               icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+               swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
+               swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
+
+               if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+                   swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+                       const struct ovs_key_nd *nd_key;
+
+                       if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
+                               return -EINVAL;
+                       *attrs &= ~(1 << OVS_KEY_ATTR_ND);
+
+                       *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+                       nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+                       memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
+                              sizeof(swkey->ipv6.nd.target));
+                       memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
+                       memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+                             const struct nlattr *a[], u32 *attrsp)
+{
+       const struct nlattr *nla;
+       u32 attrs;
+       int rem;
+
+       attrs = 0;
+       nla_for_each_nested(nla, attr, rem) {
+               u16 type = nla_type(nla);
+               int expected_len;
+
+               if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
+                       return -EINVAL;
+
+               expected_len = ovs_key_lens[type];
+               if (nla_len(nla) != expected_len && expected_len != -1)
+                       return -EINVAL;
+
+               attrs |= 1 << type;
+               a[type] = nla;
+       }
+       if (rem)
+               return -EINVAL;
+
+       *attrsp = attrs;
+       return 0;
+}
+
+/**
+ * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
+ * @swkey: receives the extracted flow key.
+ * @key_lenp: number of bytes used in @swkey.
+ * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ */
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+                     const struct nlattr *attr)
+{
+       const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+       const struct ovs_key_ethernet *eth_key;
+       int key_len;
+       u32 attrs;
+       int err;
+
+       memset(swkey, 0, sizeof(struct sw_flow_key));
+       key_len = SW_FLOW_KEY_OFFSET(eth);
+
+       err = parse_flow_nlattrs(attr, a, &attrs);
+       if (err)
+               return err;
+
+       /* Metadata attributes. */
+       if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+               swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
+               attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+       }
+       if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+               u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
+               if (in_port >= DP_MAX_PORTS)
+                       return -EINVAL;
+               swkey->phy.in_port = in_port;
+               attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+       } else {
+               swkey->phy.in_port = USHRT_MAX;
+       }
+
+       /* Data attributes. */
+       if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
+               return -EINVAL;
+       attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+
+       eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+       memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
+       memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
+
+       if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
+           nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
+               const struct nlattr *encap;
+               __be16 tci;
+
+               if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
+                             (1 << OVS_KEY_ATTR_ETHERTYPE) |
+                             (1 << OVS_KEY_ATTR_ENCAP)))
+                       return -EINVAL;
+
+               encap = a[OVS_KEY_ATTR_ENCAP];
+               tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+               if (tci & htons(VLAN_TAG_PRESENT)) {
+                       swkey->eth.tci = tci;
+
+                       err = parse_flow_nlattrs(encap, a, &attrs);
+                       if (err)
+                               return err;
+               } else if (!tci) {
+                       /* Corner case for truncated 802.1Q header. */
+                       if (nla_len(encap))
+                               return -EINVAL;
+
+                       swkey->eth.type = htons(ETH_P_8021Q);
+                       *key_lenp = key_len;
+                       return 0;
+               } else {
+                       return -EINVAL;
+               }
+       }
+
+       if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
+               swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+               if (ntohs(swkey->eth.type) < 1536)
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+       } else {
+               swkey->eth.type = htons(ETH_P_802_2);
+       }
+
+       if (swkey->eth.type == htons(ETH_P_IP)) {
+               const struct ovs_key_ipv4 *ipv4_key;
+
+               if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+               ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
+               if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
+                       return -EINVAL;
+               swkey->ip.proto = ipv4_key->ipv4_proto;
+               swkey->ip.tos = ipv4_key->ipv4_tos;
+               swkey->ip.ttl = ipv4_key->ipv4_ttl;
+               swkey->ip.frag = ipv4_key->ipv4_frag;
+               swkey->ipv4.addr.src = ipv4_key->ipv4_src;
+               swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
+
+               if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+                       err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+                       if (err)
+                               return err;
+               }
+       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+               const struct ovs_key_ipv6 *ipv6_key;
+
+               if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
+               ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
+               if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
+                       return -EINVAL;
+               swkey->ipv6.label = ipv6_key->ipv6_label;
+               swkey->ip.proto = ipv6_key->ipv6_proto;
+               swkey->ip.tos = ipv6_key->ipv6_tclass;
+               swkey->ip.ttl = ipv6_key->ipv6_hlimit;
+               swkey->ip.frag = ipv6_key->ipv6_frag;
+               memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
+                      sizeof(swkey->ipv6.addr.src));
+               memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
+                      sizeof(swkey->ipv6.addr.dst));
+
+               if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+                       err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+                       if (err)
+                               return err;
+               }
+       } else if (swkey->eth.type == htons(ETH_P_ARP)) {
+               const struct ovs_key_arp *arp_key;
+
+               if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+               arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+               swkey->ipv4.addr.src = arp_key->arp_sip;
+               swkey->ipv4.addr.dst = arp_key->arp_tip;
+               if (arp_key->arp_op & htons(0xff00))
+                       return -EINVAL;
+               swkey->ip.proto = ntohs(arp_key->arp_op);
+               memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
+               memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
+       }
+
+       if (attrs)
+               return -EINVAL;
+       *key_lenp = key_len;
+
+       return 0;
+}
+
+/**
+ * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
+ * @in_port: receives the extracted input port.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ *
+ * This parses a series of Netlink attributes that form a flow key, which must
+ * take the same form accepted by flow_from_nlattrs(), but only enough of it to
+ * get the metadata, that is, the parts of the flow key that cannot be
+ * extracted from the packet itself.
+ */
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+                              const struct nlattr *attr)
+{
+       const struct nlattr *nla;
+       int rem;
+
+       *in_port = USHRT_MAX;
+       *priority = 0;
+
+       nla_for_each_nested(nla, attr, rem) {
+               int type = nla_type(nla);
+
+               if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
+                       if (nla_len(nla) != ovs_key_lens[type])
+                               return -EINVAL;
+
+                       switch (type) {
+                       case OVS_KEY_ATTR_PRIORITY:
+                               *priority = nla_get_u32(nla);
+                               break;
+
+                       case OVS_KEY_ATTR_IN_PORT:
+                               if (nla_get_u32(nla) >= DP_MAX_PORTS)
+                                       return -EINVAL;
+                               *in_port = nla_get_u32(nla);
+                               break;
+                       }
+               }
+       }
+       if (rem)
+               return -EINVAL;
+       return 0;
+}
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+{
+       struct ovs_key_ethernet *eth_key;
+       struct nlattr *nla, *encap;
+
+       if (swkey->phy.priority)
+               NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+
+       if (swkey->phy.in_port != USHRT_MAX)
+               NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+
+       nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
+       if (!nla)
+               goto nla_put_failure;
+       eth_key = nla_data(nla);
+       memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
+       memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
+
+       if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
+               NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
+               NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+               encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+               if (!swkey->eth.tci)
+                       goto unencap;
+       } else {
+               encap = NULL;
+       }
+
+       if (swkey->eth.type == htons(ETH_P_802_2))
+               goto unencap;
+
+       NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+
+       if (swkey->eth.type == htons(ETH_P_IP)) {
+               struct ovs_key_ipv4 *ipv4_key;
+
+               nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
+               if (!nla)
+                       goto nla_put_failure;
+               ipv4_key = nla_data(nla);
+               ipv4_key->ipv4_src = swkey->ipv4.addr.src;
+               ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
+               ipv4_key->ipv4_proto = swkey->ip.proto;
+               ipv4_key->ipv4_tos = swkey->ip.tos;
+               ipv4_key->ipv4_ttl = swkey->ip.ttl;
+               ipv4_key->ipv4_frag = swkey->ip.frag;
+       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+               struct ovs_key_ipv6 *ipv6_key;
+
+               nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
+               if (!nla)
+                       goto nla_put_failure;
+               ipv6_key = nla_data(nla);
+               memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
+                               sizeof(ipv6_key->ipv6_src));
+               memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
+                               sizeof(ipv6_key->ipv6_dst));
+               ipv6_key->ipv6_label = swkey->ipv6.label;
+               ipv6_key->ipv6_proto = swkey->ip.proto;
+               ipv6_key->ipv6_tclass = swkey->ip.tos;
+               ipv6_key->ipv6_hlimit = swkey->ip.ttl;
+               ipv6_key->ipv6_frag = swkey->ip.frag;
+       } else if (swkey->eth.type == htons(ETH_P_ARP)) {
+               struct ovs_key_arp *arp_key;
+
+               nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
+               if (!nla)
+                       goto nla_put_failure;
+               arp_key = nla_data(nla);
+               memset(arp_key, 0, sizeof(struct ovs_key_arp));
+               arp_key->arp_sip = swkey->ipv4.addr.src;
+               arp_key->arp_tip = swkey->ipv4.addr.dst;
+               arp_key->arp_op = htons(swkey->ip.proto);
+               memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
+               memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
+       }
+
+       if ((swkey->eth.type == htons(ETH_P_IP) ||
+            swkey->eth.type == htons(ETH_P_IPV6)) &&
+            swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+
+               if (swkey->ip.proto == IPPROTO_TCP) {
+                       struct ovs_key_tcp *tcp_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       tcp_key = nla_data(nla);
+                       if (swkey->eth.type == htons(ETH_P_IP)) {
+                               tcp_key->tcp_src = swkey->ipv4.tp.src;
+                               tcp_key->tcp_dst = swkey->ipv4.tp.dst;
+                       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+                               tcp_key->tcp_src = swkey->ipv6.tp.src;
+                               tcp_key->tcp_dst = swkey->ipv6.tp.dst;
+                       }
+               } else if (swkey->ip.proto == IPPROTO_UDP) {
+                       struct ovs_key_udp *udp_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       udp_key = nla_data(nla);
+                       if (swkey->eth.type == htons(ETH_P_IP)) {
+                               udp_key->udp_src = swkey->ipv4.tp.src;
+                               udp_key->udp_dst = swkey->ipv4.tp.dst;
+                       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+                               udp_key->udp_src = swkey->ipv6.tp.src;
+                               udp_key->udp_dst = swkey->ipv6.tp.dst;
+                       }
+               } else if (swkey->eth.type == htons(ETH_P_IP) &&
+                          swkey->ip.proto == IPPROTO_ICMP) {
+                       struct ovs_key_icmp *icmp_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       icmp_key = nla_data(nla);
+                       icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
+                       icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
+               } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
+                          swkey->ip.proto == IPPROTO_ICMPV6) {
+                       struct ovs_key_icmpv6 *icmpv6_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
+                                               sizeof(*icmpv6_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       icmpv6_key = nla_data(nla);
+                       icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
+                       icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
+
+                       if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+                           icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+                               struct ovs_key_nd *nd_key;
+
+                               nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
+                               if (!nla)
+                                       goto nla_put_failure;
+                               nd_key = nla_data(nla);
+                               memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
+                                                       sizeof(nd_key->nd_target));
+                               memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
+                               memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
+                       }
+               }
+       }
+
+unencap:
+       if (encap)
+               nla_nest_end(skb, encap);
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+/* Initializes the flow module.
+ * Returns zero if successful or a negative error code. */
+int ovs_flow_init(void)
+{
+       flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
+                                       0, NULL);
+       if (flow_cache == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/* Uninitializes the flow module. */
+void ovs_flow_exit(void)
+{
+       kmem_cache_destroy(flow_cache);
+}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
new file mode 100644 (file)
index 0000000..2747dc2
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef FLOW_H
+#define FLOW_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+#include <net/inet_ecn.h>
+
+struct sk_buff;
+
+struct sw_flow_actions {
+       struct rcu_head rcu;
+       u32 actions_len;
+       struct nlattr actions[];
+};
+
+struct sw_flow_key {
+       struct {
+               u32     priority;       /* Packet QoS priority. */
+               u16     in_port;        /* Input switch port (or USHRT_MAX). */
+       } phy;
+       struct {
+               u8     src[ETH_ALEN];   /* Ethernet source address. */
+               u8     dst[ETH_ALEN];   /* Ethernet destination address. */
+               __be16 tci;             /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+               __be16 type;            /* Ethernet frame type. */
+       } eth;
+       struct {
+               u8     proto;           /* IP protocol or lower 8 bits of ARP opcode. */
+               u8     tos;             /* IP ToS. */
+               u8     ttl;             /* IP TTL/hop limit. */
+               u8     frag;            /* One of OVS_FRAG_TYPE_*. */
+       } ip;
+       union {
+               struct {
+                       struct {
+                               __be32 src;     /* IP source address. */
+                               __be32 dst;     /* IP destination address. */
+                       } addr;
+                       union {
+                               struct {
+                                       __be16 src;             /* TCP/UDP source port. */
+                                       __be16 dst;             /* TCP/UDP destination port. */
+                               } tp;
+                               struct {
+                                       u8 sha[ETH_ALEN];       /* ARP source hardware address. */
+                                       u8 tha[ETH_ALEN];       /* ARP target hardware address. */
+                               } arp;
+                       };
+               } ipv4;
+               struct {
+                       struct {
+                               struct in6_addr src;    /* IPv6 source address. */
+                               struct in6_addr dst;    /* IPv6 destination address. */
+                       } addr;
+                       __be32 label;                   /* IPv6 flow label. */
+                       struct {
+                               __be16 src;             /* TCP/UDP source port. */
+                               __be16 dst;             /* TCP/UDP destination port. */
+                       } tp;
+                       struct {
+                               struct in6_addr target; /* ND target address. */
+                               u8 sll[ETH_ALEN];       /* ND source link layer address. */
+                               u8 tll[ETH_ALEN];       /* ND target link layer address. */
+                       } nd;
+               } ipv6;
+       };
+};
+
+struct sw_flow {
+       struct rcu_head rcu;
+       struct hlist_node hash_node[2];
+       u32 hash;
+
+       struct sw_flow_key key;
+       struct sw_flow_actions __rcu *sf_acts;
+
+       spinlock_t lock;        /* Lock for values below. */
+       unsigned long used;     /* Last used time (in jiffies). */
+       u64 packet_count;       /* Number of packets matched. */
+       u64 byte_count;         /* Number of bytes matched. */
+       u8 tcp_flags;           /* Union of seen TCP flags. */
+};
+
+struct arp_eth_header {
+       __be16      ar_hrd;     /* format of hardware address   */
+       __be16      ar_pro;     /* format of protocol address   */
+       unsigned char   ar_hln; /* length of hardware address   */
+       unsigned char   ar_pln; /* length of protocol address   */
+       __be16      ar_op;      /* ARP opcode (command)     */
+
+       /* Ethernet+IPv4 specific members. */
+       unsigned char       ar_sha[ETH_ALEN];   /* sender hardware address  */
+       unsigned char       ar_sip[4];          /* sender IP address        */
+       unsigned char       ar_tha[ETH_ALEN];   /* target hardware address  */
+       unsigned char       ar_tip[4];          /* target IP address        */
+} __packed;
+
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
+
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_deferred_free(struct sw_flow *);
+void ovs_flow_free(struct sw_flow *flow);
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
+
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
+                    int *key_lenp);
+void ovs_flow_used(struct sw_flow *, struct sk_buff *);
+u64 ovs_flow_used_time(unsigned long flow_jiffies);
+
+/* Upper bound on the length of a nlattr-formatted flow key.  The longest
+ * nlattr-formatted flow key would be:
+ *
+ *                         struct  pad  nl hdr  total
+ *                         ------  ---  ------  -----
+ *  OVS_KEY_ATTR_PRIORITY      4    --     4      8
+ *  OVS_KEY_ATTR_IN_PORT       4    --     4      8
+ *  OVS_KEY_ATTR_ETHERNET     12    --     4     16
+ *  OVS_KEY_ATTR_8021Q         4    --     4      8
+ *  OVS_KEY_ATTR_ETHERTYPE     2     2     4      8
+ *  OVS_KEY_ATTR_IPV6         40    --     4     44
+ *  OVS_KEY_ATTR_ICMPV6        2     2     4      8
+ *  OVS_KEY_ATTR_ND           28    --     4     32
+ *  -------------------------------------------------
+ *  total                                       132
+ */
+#define FLOW_BUFSIZE 132
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+                     const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+                              const struct nlattr *);
+
+#define TBL_MIN_BUCKETS                1024
+
+struct flow_table {
+       struct flex_array *buckets;
+       unsigned int count, n_buckets;
+       struct rcu_head rcu;
+       int node_ver;
+       u32 hash_seed;
+       bool keep_flows;
+};
+
+static inline int ovs_flow_tbl_count(struct flow_table *table)
+{
+       return table->count;
+}
+
+static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
+{
+       return (table->count > table->n_buckets);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+                                   struct sw_flow_key *key, int len);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_alloc(int new_size);
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
+
+#endif /* flow.h */
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
new file mode 100644 (file)
index 0000000..8fc28b8
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/hardirq.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+struct internal_dev {
+       struct vport *vport;
+};
+
+static struct internal_dev *internal_dev_priv(struct net_device *netdev)
+{
+       return netdev_priv(netdev);
+}
+
+/* This function is only called by the kernel network layer.*/
+static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
+                                                       struct rtnl_link_stats64 *stats)
+{
+       struct vport *vport = ovs_internal_dev_get_vport(netdev);
+       struct ovs_vport_stats vport_stats;
+
+       ovs_vport_get_stats(vport, &vport_stats);
+
+       /* The tx and rx stats need to be swapped because the
+        * switch and host OS have opposite perspectives. */
+       stats->rx_packets       = vport_stats.tx_packets;
+       stats->tx_packets       = vport_stats.rx_packets;
+       stats->rx_bytes         = vport_stats.tx_bytes;
+       stats->tx_bytes         = vport_stats.rx_bytes;
+       stats->rx_errors        = vport_stats.tx_errors;
+       stats->tx_errors        = vport_stats.rx_errors;
+       stats->rx_dropped       = vport_stats.tx_dropped;
+       stats->tx_dropped       = vport_stats.rx_dropped;
+
+       return stats;
+}
+
+static int internal_dev_mac_addr(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       return 0;
+}
+
+/* Called with rcu_read_lock_bh. */
+static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       rcu_read_lock();
+       ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
+       rcu_read_unlock();
+       return 0;
+}
+
+static int internal_dev_open(struct net_device *netdev)
+{
+       netif_start_queue(netdev);
+       return 0;
+}
+
+static int internal_dev_stop(struct net_device *netdev)
+{
+       netif_stop_queue(netdev);
+       return 0;
+}
+
+static void internal_dev_getinfo(struct net_device *netdev,
+                                struct ethtool_drvinfo *info)
+{
+       strcpy(info->driver, "openvswitch");
+}
+
+static const struct ethtool_ops internal_dev_ethtool_ops = {
+       .get_drvinfo    = internal_dev_getinfo,
+       .get_link       = ethtool_op_get_link,
+};
+
+static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       if (new_mtu < 68)
+               return -EINVAL;
+
+       netdev->mtu = new_mtu;
+       return 0;
+}
+
+static void internal_dev_destructor(struct net_device *dev)
+{
+       struct vport *vport = ovs_internal_dev_get_vport(dev);
+
+       ovs_vport_free(vport);
+       free_netdev(dev);
+}
+
+static const struct net_device_ops internal_dev_netdev_ops = {
+       .ndo_open = internal_dev_open,
+       .ndo_stop = internal_dev_stop,
+       .ndo_start_xmit = internal_dev_xmit,
+       .ndo_set_mac_address = internal_dev_mac_addr,
+       .ndo_change_mtu = internal_dev_change_mtu,
+       .ndo_get_stats64 = internal_dev_get_stats,
+};
+
+static void do_setup(struct net_device *netdev)
+{
+       ether_setup(netdev);
+
+       netdev->netdev_ops = &internal_dev_netdev_ops;
+
+       netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       netdev->destructor = internal_dev_destructor;
+       SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
+       netdev->tx_queue_len = 0;
+
+       netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
+                               NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+
+       netdev->vlan_features = netdev->features;
+       netdev->features |= NETIF_F_HW_VLAN_TX;
+       netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
+       random_ether_addr(netdev->dev_addr);
+}
+
+static struct vport *internal_dev_create(const struct vport_parms *parms)
+{
+       struct vport *vport;
+       struct netdev_vport *netdev_vport;
+       struct internal_dev *internal_dev;
+       int err;
+
+       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+                               &ovs_internal_vport_ops, parms);
+       if (IS_ERR(vport)) {
+               err = PTR_ERR(vport);
+               goto error;
+       }
+
+       netdev_vport = netdev_vport_priv(vport);
+
+       netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
+                                        parms->name, do_setup);
+       if (!netdev_vport->dev) {
+               err = -ENOMEM;
+               goto error_free_vport;
+       }
+
+       internal_dev = internal_dev_priv(netdev_vport->dev);
+       internal_dev->vport = vport;
+
+       err = register_netdevice(netdev_vport->dev);
+       if (err)
+               goto error_free_netdev;
+
+       dev_set_promiscuity(netdev_vport->dev, 1);
+       netif_start_queue(netdev_vport->dev);
+
+       return vport;
+
+error_free_netdev:
+       free_netdev(netdev_vport->dev);
+error_free_vport:
+       ovs_vport_free(vport);
+error:
+       return ERR_PTR(err);
+}
+
+static void internal_dev_destroy(struct vport *vport)
+{
+       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+       netif_stop_queue(netdev_vport->dev);
+       dev_set_promiscuity(netdev_vport->dev, -1);
+
+       /* unregister_netdevice() waits for an RCU grace period. */
+       unregister_netdevice(netdev_vport->dev);
+}
+
+static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+{
+       struct net_device *netdev = netdev_vport_priv(vport)->dev;
+       int len;
+
+       len = skb->len;
+       skb->dev = netdev;
+       skb->pkt_type = PACKET_HOST;
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       netif_rx(skb);
+
+       return len;
+}
+
+const struct vport_ops ovs_internal_vport_ops = {
+       .type           = OVS_VPORT_TYPE_INTERNAL,
+       .create         = internal_dev_create,
+       .destroy        = internal_dev_destroy,
+       .get_name       = ovs_netdev_get_name,
+       .get_ifindex    = ovs_netdev_get_ifindex,
+       .send           = internal_dev_recv,
+};
+
+int ovs_is_internal_dev(const struct net_device *netdev)
+{
+       return netdev->netdev_ops == &internal_dev_netdev_ops;
+}
+
+struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
+{
+       if (!ovs_is_internal_dev(netdev))
+               return NULL;
+
+       return internal_dev_priv(netdev)->vport;
+}
diff --git a/net/openvswitch/vport-internal_dev.h b/net/openvswitch/vport-internal_dev.h
new file mode 100644 (file)
index 0000000..3454447
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_INTERNAL_DEV_H
+#define VPORT_INTERNAL_DEV_H 1
+
+#include "datapath.h"
+#include "vport.h"
+
+int ovs_is_internal_dev(const struct net_device *);
+struct vport *ovs_internal_dev_get_vport(struct net_device *);
+
+#endif /* vport-internal_dev.h */
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
new file mode 100644 (file)
index 0000000..c1068ae
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/if_arp.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/llc.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+
+#include <net/llc.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+/* Must be called with rcu_read_lock. */
+static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
+{
+       if (unlikely(!vport)) {
+               kfree_skb(skb);
+               return;
+       }
+
+       /* Make our own copy of the packet.  Otherwise we will mangle the
+        * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
+        * (No one comes after us, since we tell handle_bridge() that we took
+        * the packet.) */
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
+               return;
+
+       skb_push(skb, ETH_HLEN);
+       ovs_vport_receive(vport, skb);
+}
+
+/* Called with rcu_read_lock and bottom-halves disabled. */
+static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+       struct vport *vport;
+
+       if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+               return RX_HANDLER_PASS;
+
+       vport = ovs_netdev_get_vport(skb->dev);
+
+       netdev_port_receive(vport, skb);
+
+       return RX_HANDLER_CONSUMED;
+}
+
+static struct vport *netdev_create(const struct vport_parms *parms)
+{
+       struct vport *vport;
+       struct netdev_vport *netdev_vport;
+       int err;
+
+       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+                               &ovs_netdev_vport_ops, parms);
+       if (IS_ERR(vport)) {
+               err = PTR_ERR(vport);
+               goto error;
+       }
+
+       netdev_vport = netdev_vport_priv(vport);
+
+       netdev_vport->dev = dev_get_by_name(&init_net, parms->name);
+       if (!netdev_vport->dev) {
+               err = -ENODEV;
+               goto error_free_vport;
+       }
+
+       if (netdev_vport->dev->flags & IFF_LOOPBACK ||
+           netdev_vport->dev->type != ARPHRD_ETHER ||
+           ovs_is_internal_dev(netdev_vport->dev)) {
+               err = -EINVAL;
+               goto error_put;
+       }
+
+       err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+                                        vport);
+       if (err)
+               goto error_put;
+
+       dev_set_promiscuity(netdev_vport->dev, 1);
+       netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+
+       return vport;
+
+error_put:
+       dev_put(netdev_vport->dev);
+error_free_vport:
+       ovs_vport_free(vport);
+error:
+       return ERR_PTR(err);
+}
+
+static void netdev_destroy(struct vport *vport)
+{
+       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+       netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+       netdev_rx_handler_unregister(netdev_vport->dev);
+       dev_set_promiscuity(netdev_vport->dev, -1);
+
+       synchronize_rcu();
+
+       dev_put(netdev_vport->dev);
+       ovs_vport_free(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *vport)
+{
+       const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+       return netdev_vport->dev->name;
+}
+
+int ovs_netdev_get_ifindex(const struct vport *vport)
+{
+       const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+       return netdev_vport->dev->ifindex;
+}
+
+static unsigned packet_length(const struct sk_buff *skb)
+{
+       unsigned length = skb->len - ETH_HLEN;
+
+       if (skb->protocol == htons(ETH_P_8021Q))
+               length -= VLAN_HLEN;
+
+       return length;
+}
+
+static int netdev_send(struct vport *vport, struct sk_buff *skb)
+{
+       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+       int mtu = netdev_vport->dev->mtu;
+       int len;
+
+       if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+               if (net_ratelimit())
+                       pr_warn("%s: dropped over-mtu packet: %d > %d\n",
+                               ovs_dp_name(vport->dp), packet_length(skb), mtu);
+               goto error;
+       }
+
+       if (unlikely(skb_warn_if_lro(skb)))
+               goto error;
+
+       skb->dev = netdev_vport->dev;
+       len = skb->len;
+       dev_queue_xmit(skb);
+
+       return len;
+
+error:
+       kfree_skb(skb);
+       ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
+       return 0;
+}
+
+/* Returns null if this device is not attached to a datapath. */
+struct vport *ovs_netdev_get_vport(struct net_device *dev)
+{
+       if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
+               return (struct vport *)
+                       rcu_dereference_rtnl(dev->rx_handler_data);
+       else
+               return NULL;
+}
+
+const struct vport_ops ovs_netdev_vport_ops = {
+       .type           = OVS_VPORT_TYPE_NETDEV,
+       .create         = netdev_create,
+       .destroy        = netdev_destroy,
+       .get_name       = ovs_netdev_get_name,
+       .get_ifindex    = ovs_netdev_get_ifindex,
+       .send           = netdev_send,
+};
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
new file mode 100644 (file)
index 0000000..fd9b008
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_NETDEV_H
+#define VPORT_NETDEV_H 1
+
+#include <linux/netdevice.h>
+
+#include "vport.h"
+
+struct vport *ovs_netdev_get_vport(struct net_device *dev);
+
+struct netdev_vport {
+       struct net_device *dev;
+};
+
+static inline struct netdev_vport *
+netdev_vport_priv(const struct vport *vport)
+{
+       return vport_priv(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *);
+const char *ovs_netdev_get_config(const struct vport *);
+int ovs_netdev_get_ifindex(const struct vport *);
+
+#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
new file mode 100644 (file)
index 0000000..7f0ef37
--- /dev/null
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/dcache.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+
+#include "vport.h"
+#include "vport-internal_dev.h"
+
+/* List of statically compiled vport implementations.  Don't forget to also
+ * add yours to the list at the bottom of vport.h. */
+static const struct vport_ops *vport_ops_list[] = {
+       &ovs_netdev_vport_ops,
+       &ovs_internal_vport_ops,
+};
+
+/* Protected by RCU read lock for reading, RTNL lock for writing. */
+static struct hlist_head *dev_table;
+#define VPORT_HASH_BUCKETS 1024
+
+/**
+ *     ovs_vport_init - initialize vport subsystem
+ *
+ * Called at module load time to initialize the vport subsystem.
+ */
+int ovs_vport_init(void)
+{
+       dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+                           GFP_KERNEL);
+       if (!dev_table)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ *     ovs_vport_exit - shutdown vport subsystem
+ *
+ * Called at module exit time to shutdown the vport subsystem.
+ */
+void ovs_vport_exit(void)
+{
+       kfree(dev_table);
+}
+
+static struct hlist_head *hash_bucket(const char *name)
+{
+       unsigned int hash = full_name_hash(name, strlen(name));
+       return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
+}
+
+/**
+ *     ovs_vport_locate - find a port that has already been created
+ *
+ * @name: name of port to find
+ *
+ * Must be called with RTNL or RCU read lock.
+ */
+struct vport *ovs_vport_locate(const char *name)
+{
+       struct hlist_head *bucket = hash_bucket(name);
+       struct vport *vport;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
+               if (!strcmp(name, vport->ops->get_name(vport)))
+                       return vport;
+
+       return NULL;
+}
+
+/**
+ *     ovs_vport_alloc - allocate and initialize new vport
+ *
+ * @priv_size: Size of private data area to allocate.
+ * @ops: vport device ops
+ *
+ * Allocate and initialize a new vport defined by @ops.  The vport will contain
+ * a private data area of size @priv_size that can be accessed using
+ * vport_priv().  vports that are no longer needed should be released with
+ * vport_free().
+ */
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
+                         const struct vport_parms *parms)
+{
+       struct vport *vport;
+       size_t alloc_size;
+
+       alloc_size = sizeof(struct vport);
+       if (priv_size) {
+               alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
+               alloc_size += priv_size;
+       }
+
+       vport = kzalloc(alloc_size, GFP_KERNEL);
+       if (!vport)
+               return ERR_PTR(-ENOMEM);
+
+       vport->dp = parms->dp;
+       vport->port_no = parms->port_no;
+       vport->upcall_pid = parms->upcall_pid;
+       vport->ops = ops;
+
+       vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
+       if (!vport->percpu_stats) {
+               kfree(vport);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       spin_lock_init(&vport->stats_lock);
+
+       return vport;
+}
+
+/**
+ *     ovs_vport_free - uninitialize and free vport
+ *
+ * @vport: vport to free
+ *
+ * Frees a vport allocated with vport_alloc() when it is no longer needed.
+ *
+ * The caller must ensure that an RCU grace period has passed since the last
+ * time @vport was in a datapath.
+ */
+void ovs_vport_free(struct vport *vport)
+{
+       free_percpu(vport->percpu_stats);
+       kfree(vport);
+}
+
+/**
+ *     ovs_vport_add - add vport device (for kernel callers)
+ *
+ * @parms: Information about new vport.
+ *
+ * Creates a new vport with the specified configuration (which is dependent on
+ * device type).  RTNL lock must be held.
+ */
+struct vport *ovs_vport_add(const struct vport_parms *parms)
+{
+       struct vport *vport;
+       int err = 0;
+       int i;
+
+       ASSERT_RTNL();
+
+       for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
+               if (vport_ops_list[i]->type == parms->type) {
+                       vport = vport_ops_list[i]->create(parms);
+                       if (IS_ERR(vport)) {
+                               err = PTR_ERR(vport);
+                               goto out;
+                       }
+
+                       hlist_add_head_rcu(&vport->hash_node,
+                                          hash_bucket(vport->ops->get_name(vport)));
+                       return vport;
+               }
+       }
+
+       err = -EAFNOSUPPORT;
+
+out:
+       return ERR_PTR(err);
+}
+
+/**
+ *     ovs_vport_set_options - modify existing vport device (for kernel callers)
+ *
+ * @vport: vport to modify.
+ * @port: New configuration.
+ *
+ * Modifies an existing device with the specified configuration (which is
+ * dependent on device type).  RTNL lock must be held.
+ */
+int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
+{
+       ASSERT_RTNL();
+
+       if (!vport->ops->set_options)
+               return -EOPNOTSUPP;
+       return vport->ops->set_options(vport, options);
+}
+
+/**
+ *     ovs_vport_del - delete existing vport device
+ *
+ * @vport: vport to delete.
+ *
+ * Detaches @vport from its datapath and destroys it.  It is possible to fail
+ * for reasons such as lack of memory.  RTNL lock must be held.
+ */
+void ovs_vport_del(struct vport *vport)
+{
+       ASSERT_RTNL();
+
+       hlist_del_rcu(&vport->hash_node);
+
+       vport->ops->destroy(vport);
+}
+
+/**
+ *     ovs_vport_get_stats - retrieve device stats
+ *
+ * @vport: vport from which to retrieve the stats
+ * @stats: location to store stats
+ *
+ * Retrieves transmit, receive, and error stats for the given device.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
+{
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       /* We potentially have 2 sources of stats that need to be combined:
+        * those we have collected (split into err_stats and percpu_stats) from
+        * set_stats() and device error stats from netdev->get_stats() (for
+        * errors that happen  downstream and therefore aren't reported through
+        * our vport_record_error() function).
+        * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
+        * netdev-stats can be directly read over netlink-ioctl.
+        */
+
+       spin_lock_bh(&vport->stats_lock);
+
+       stats->rx_errors        = vport->err_stats.rx_errors;
+       stats->tx_errors        = vport->err_stats.tx_errors;
+       stats->tx_dropped       = vport->err_stats.tx_dropped;
+       stats->rx_dropped       = vport->err_stats.rx_dropped;
+
+       spin_unlock_bh(&vport->stats_lock);
+
+       for_each_possible_cpu(i) {
+               const struct vport_percpu_stats *percpu_stats;
+               struct vport_percpu_stats local_stats;
+               unsigned int start;
+
+               percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+                       local_stats = *percpu_stats;
+               } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+               stats->rx_bytes         += local_stats.rx_bytes;
+               stats->rx_packets       += local_stats.rx_packets;
+               stats->tx_bytes         += local_stats.tx_bytes;
+               stats->tx_packets       += local_stats.tx_packets;
+       }
+}
+
+/**
+ *     ovs_vport_get_options - retrieve device options
+ *
+ * @vport: vport from which to retrieve the options.
+ * @skb: sk_buff where options should be appended.
+ *
+ * Retrieves the configuration of the given device, appending an
+ * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
+ * vport-specific attributes to @skb.
+ *
+ * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
+ * negative error code if a real error occurred.  If an error occurs, @skb is
+ * left unmodified.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+       struct nlattr *nla;
+
+       nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
+       if (!nla)
+               return -EMSGSIZE;
+
+       if (vport->ops->get_options) {
+               int err = vport->ops->get_options(vport, skb);
+               if (err) {
+                       nla_nest_cancel(skb, nla);
+                       return err;
+               }
+       }
+
+       nla_nest_end(skb, nla);
+       return 0;
+}
+
+/**
+ *     ovs_vport_receive - pass up received packet to the datapath for processing
+ *
+ * @vport: vport that received the packet
+ * @skb: skb that was received
+ *
+ * Must be called with rcu_read_lock.  The packet cannot be shared and
+ * skb->data should point to the Ethernet header.  The caller must have already
+ * called compute_ip_summed() to initialize the checksumming fields.
+ */
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
+{
+       struct vport_percpu_stats *stats;
+
+       stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+       u64_stats_update_begin(&stats->sync);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->sync);
+
+       ovs_dp_process_received_packet(vport, skb);
+}
+
+/**
+ *     ovs_vport_send - send a packet on a device
+ *
+ * @vport: vport on which to send the packet
+ * @skb: skb to send
+ *
+ * Sends the given packet and returns the length of data sent.  Either RTNL
+ * lock or rcu_read_lock must be held.
+ */
+int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+{
+       int sent = vport->ops->send(vport, skb);
+
+       if (likely(sent)) {
+               struct vport_percpu_stats *stats;
+
+               stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+               u64_stats_update_begin(&stats->sync);
+               stats->tx_packets++;
+               stats->tx_bytes += sent;
+               u64_stats_update_end(&stats->sync);
+       }
+       return sent;
+}
+
+/**
+ *     ovs_vport_record_error - indicate device error to generic stats layer
+ *
+ * @vport: vport that encountered the error
+ * @err_type: one of enum vport_err_type types to indicate the error type
+ *
+ * If using the vport generic stats layer indicate that an error of the given
+ * type has occured.
+ */
+void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
+{
+       spin_lock(&vport->stats_lock);
+
+       switch (err_type) {
+       case VPORT_E_RX_DROPPED:
+               vport->err_stats.rx_dropped++;
+               break;
+
+       case VPORT_E_RX_ERROR:
+               vport->err_stats.rx_errors++;
+               break;
+
+       case VPORT_E_TX_DROPPED:
+               vport->err_stats.tx_dropped++;
+               break;
+
+       case VPORT_E_TX_ERROR:
+               vport->err_stats.tx_errors++;
+               break;
+       };
+
+       spin_unlock(&vport->stats_lock);
+}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
new file mode 100644 (file)
index 0000000..1960962
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_H
+#define VPORT_H 1
+
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/u64_stats_sync.h>
+
+#include "datapath.h"
+
+struct vport;
+struct vport_parms;
+
+/* The following definitions are for users of the vport subsytem: */
+
+int ovs_vport_init(void);
+void ovs_vport_exit(void);
+
+struct vport *ovs_vport_add(const struct vport_parms *);
+void ovs_vport_del(struct vport *);
+
+struct vport *ovs_vport_locate(const char *name);
+
+void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
+
+int ovs_vport_set_options(struct vport *, struct nlattr *options);
+int ovs_vport_get_options(const struct vport *, struct sk_buff *);
+
+int ovs_vport_send(struct vport *, struct sk_buff *);
+
+/* The following definitions are for implementers of vport devices: */
+
+struct vport_percpu_stats {
+       u64 rx_bytes;
+       u64 rx_packets;
+       u64 tx_bytes;
+       u64 tx_packets;
+       struct u64_stats_sync sync;
+};
+
+struct vport_err_stats {
+       u64 rx_dropped;
+       u64 rx_errors;
+       u64 tx_dropped;
+       u64 tx_errors;
+};
+
+/**
+ * struct vport - one port within a datapath
+ * @rcu: RCU callback head for deferred destruction.
+ * @port_no: Index into @dp's @ports array.
+ * @dp: Datapath to which this port belongs.
+ * @node: Element in @dp's @port_list.
+ * @upcall_pid: The Netlink port to use for packets received on this port that
+ * miss the flow table.
+ * @hash_node: Element in @dev_table hash table in vport.c.
+ * @ops: Class structure.
+ * @percpu_stats: Points to per-CPU statistics used and maintained by vport
+ * @stats_lock: Protects @err_stats;
+ * @err_stats: Points to error statistics used and maintained by vport
+ */
+struct vport {
+       struct rcu_head rcu;
+       u16 port_no;
+       struct datapath *dp;
+       struct list_head node;
+       u32 upcall_pid;
+
+       struct hlist_node hash_node;
+       const struct vport_ops *ops;
+
+       struct vport_percpu_stats __percpu *percpu_stats;
+
+       spinlock_t stats_lock;
+       struct vport_err_stats err_stats;
+};
+
+/**
+ * struct vport_parms - parameters for creating a new vport
+ *
+ * @name: New vport's name.
+ * @type: New vport's type.
+ * @options: %OVS_VPORT_ATTR_OPTIONS attribute from Netlink message, %NULL if
+ * none was supplied.
+ * @dp: New vport's datapath.
+ * @port_no: New vport's port number.
+ */
+struct vport_parms {
+       const char *name;
+       enum ovs_vport_type type;
+       struct nlattr *options;
+
+       /* For ovs_vport_alloc(). */
+       struct datapath *dp;
+       u16 port_no;
+       u32 upcall_pid;
+};
+
+/**
+ * struct vport_ops - definition of a type of virtual port
+ *
+ * @type: %OVS_VPORT_TYPE_* value for this type of virtual port.
+ * @create: Create a new vport configured as specified.  On success returns
+ * a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value.
+ * @destroy: Destroys a vport.  Must call vport_free() on the vport but not
+ * before an RCU grace period has elapsed.
+ * @set_options: Modify the configuration of an existing vport.  May be %NULL
+ * if modification is not supported.
+ * @get_options: Appends vport-specific attributes for the configuration of an
+ * existing vport to a &struct sk_buff.  May be %NULL for a vport that does not
+ * have any configuration.
+ * @get_name: Get the device's name.
+ * @get_config: Get the device's configuration.
+ * @get_ifindex: Get the system interface index associated with the device.
+ * May be null if the device does not have an ifindex.
+ * @send: Send a packet on the device.  Returns the length of the packet sent.
+ */
+struct vport_ops {
+       enum ovs_vport_type type;
+
+       /* Called with RTNL lock. */
+       struct vport *(*create)(const struct vport_parms *);
+       void (*destroy)(struct vport *);
+
+       int (*set_options)(struct vport *, struct nlattr *);
+       int (*get_options)(const struct vport *, struct sk_buff *);
+
+       /* Called with rcu_read_lock or RTNL lock. */
+       const char *(*get_name)(const struct vport *);
+       void (*get_config)(const struct vport *, void *);
+       int (*get_ifindex)(const struct vport *);
+
+       int (*send)(struct vport *, struct sk_buff *);
+};
+
+enum vport_err_type {
+       VPORT_E_RX_DROPPED,
+       VPORT_E_RX_ERROR,
+       VPORT_E_TX_DROPPED,
+       VPORT_E_TX_ERROR,
+};
+
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
+                             const struct vport_parms *);
+void ovs_vport_free(struct vport *);
+
+#define VPORT_ALIGN 8
+
+/**
+ *     vport_priv - access private data area of vport
+ *
+ * @vport: vport to access
+ *
+ * If a nonzero size was passed in priv_size of vport_alloc() a private data
+ * area was allocated on creation.  This allows that area to be accessed and
+ * used for any purpose needed by the vport implementer.
+ */
+static inline void *vport_priv(const struct vport *vport)
+{
+       return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
+}
+
+/**
+ *     vport_from_priv - lookup vport from private data pointer
+ *
+ * @priv: Start of private data area.
+ *
+ * It is sometimes useful to translate from a pointer to the private data
+ * area to the vport, such as in the case where the private data pointer is
+ * the result of a hash table lookup.  @priv must point to the start of the
+ * private data area.
+ */
+static inline struct vport *vport_from_priv(const void *priv)
+{
+       return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
+}
+
+void ovs_vport_receive(struct vport *, struct sk_buff *);
+void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
+
+/* List of statically compiled vport implementations.  Don't forget to also
+ * add yours to the list at the top of vport.c. */
+extern const struct vport_ops ovs_netdev_vport_ops;
+extern const struct vport_ops ovs_internal_vport_ops;
+
+#endif /* vport.h */
index 82a6f34d39d012fb35d9a0d490503fcc2048e6e2..2dbb32b988c4d476406e212f0ff5632677e0501b 100644 (file)
@@ -1499,10 +1499,11 @@ retry:
 
        if (!skb) {
                size_t reserved = LL_RESERVED_SPACE(dev);
+               int tlen = dev->needed_tailroom;
                unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
 
                rcu_read_unlock();
-               skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+               skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
                if (skb == NULL)
                        return -ENOBUFS;
                /* FIXME: Save some space for broken drivers that write a hard
@@ -1630,8 +1631,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        if (snaplen > res)
                snaplen = res;
 
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf)
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto drop_n_acct;
 
        if (skb_shared(skb)) {
@@ -1762,8 +1762,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->tp_version <= TPACKET_V2) {
                if (macoff + snaplen > po->rx_ring.frame_size) {
                        if (po->copy_thresh &&
-                               atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-                               < (unsigned)sk->sk_rcvbuf) {
+                           atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
                                if (skb_shared(skb)) {
                                        copy_skb = skb_clone(skb, GFP_ATOMIC);
                                } else {
@@ -1944,7 +1943,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
 
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
                void *frame, struct net_device *dev, int size_max,
-               __be16 proto, unsigned char *addr)
+               __be16 proto, unsigned char *addr, int hlen)
 {
        union {
                struct tpacket_hdr *h1;
@@ -1978,7 +1977,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
                return -EMSGSIZE;
        }
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
 
        data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2053,6 +2052,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        unsigned char *addr;
        int len_sum = 0;
        int status = 0;
+       int hlen, tlen;
 
        mutex_lock(&po->pg_vec_lock);
 
@@ -2101,16 +2101,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                }
 
                status = TP_STATUS_SEND_REQUEST;
+               hlen = LL_RESERVED_SPACE(dev);
+               tlen = dev->needed_tailroom;
                skb = sock_alloc_send_skb(&po->sk,
-                               LL_ALLOCATED_SPACE(dev)
-                               + sizeof(struct sockaddr_ll),
+                               hlen + tlen + sizeof(struct sockaddr_ll),
                                0, &err);
 
                if (unlikely(skb == NULL))
                        goto out_status;
 
                tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
-                               addr);
+                               addr, hlen);
 
                if (unlikely(tp_len < 0)) {
                        if (po->tp_loss) {
@@ -2207,6 +2208,7 @@ static int packet_snd(struct socket *sock,
        int vnet_hdr_len;
        struct packet_sock *po = pkt_sk(sk);
        unsigned short gso_type = 0;
+       int hlen, tlen;
 
        /*
         *      Get and verify the address.
@@ -2291,8 +2293,9 @@ static int packet_snd(struct socket *sock,
                goto out_unlock;
 
        err = -ENOBUFS;
-       skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
-                              LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
                               msg->msg_flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto out_unlock;
@@ -2450,8 +2453,12 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
 {
        struct packet_sock *po = pkt_sk(sk);
 
-       if (po->fanout)
+       if (po->fanout) {
+               if (dev)
+                       dev_put(dev);
+
                return -EINVAL;
+       }
 
        lock_sock(sk);
 
index 2ba6e9fb4cbcd9554b6cf3eadb9088ca9e577ad7..9f60008740e32875fb80d64b070cdf7262366650 100644 (file)
@@ -534,6 +534,29 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
        return pipe_handler_send_created_ind(sk);
 }
 
+static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+       struct pnpipehdr *hdr = pnp_hdr(skb);
+
+       if (hdr->error_code != PN_PIPE_NO_ERROR)
+               return -ECONNREFUSED;
+
+       return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
+               NULL, 0, GFP_ATOMIC);
+
+}
+
+static void pipe_start_flow_control(struct sock *sk)
+{
+       struct pep_sock *pn = pep_sk(sk);
+
+       if (!pn_flow_safe(pn->tx_fc)) {
+               atomic_set(&pn->tx_credits, 1);
+               sk->sk_write_space(sk);
+       }
+       pipe_grant_credits(sk, GFP_ATOMIC);
+}
+
 /* Queue an skb to an actively connected sock.
  * Socket lock must be held. */
 static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
@@ -579,13 +602,25 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
                        sk->sk_state = TCP_CLOSE_WAIT;
                        break;
                }
+               if (pn->init_enable == PN_PIPE_DISABLE)
+                       sk->sk_state = TCP_SYN_RECV;
+               else {
+                       sk->sk_state = TCP_ESTABLISHED;
+                       pipe_start_flow_control(sk);
+               }
+               break;
 
-               sk->sk_state = TCP_ESTABLISHED;
-               if (!pn_flow_safe(pn->tx_fc)) {
-                       atomic_set(&pn->tx_credits, 1);
-                       sk->sk_write_space(sk);
+       case PNS_PEP_ENABLE_RESP:
+               if (sk->sk_state != TCP_SYN_SENT)
+                       break;
+
+               if (pep_enableresp_rcv(sk, skb)) {
+                       sk->sk_state = TCP_CLOSE_WAIT;
+                       break;
                }
-               pipe_grant_credits(sk, GFP_ATOMIC);
+
+               sk->sk_state = TCP_ESTABLISHED;
+               pipe_start_flow_control(sk);
                break;
 
        case PNS_PEP_DISCONNECT_RESP:
@@ -864,14 +899,32 @@ static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
        int err;
        u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 
-       pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+       if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
+               pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+
        err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
-                                       PN_PIPE_ENABLE, data, 4);
+                               pn->init_enable, data, 4);
        if (err) {
                pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
                return err;
        }
+
        sk->sk_state = TCP_SYN_SENT;
+
+       return 0;
+}
+
+static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+{
+       int err;
+
+       err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
+                               NULL, 0);
+       if (err)
+               return err;
+
+       sk->sk_state = TCP_SYN_SENT;
+
        return 0;
 }
 
@@ -879,11 +932,14 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
        struct pep_sock *pn = pep_sk(sk);
        int answ;
+       int ret = -ENOIOCTLCMD;
 
        switch (cmd) {
        case SIOCINQ:
-               if (sk->sk_state == TCP_LISTEN)
-                       return -EINVAL;
+               if (sk->sk_state == TCP_LISTEN) {
+                       ret = -EINVAL;
+                       break;
+               }
 
                lock_sock(sk);
                if (sock_flag(sk, SOCK_URGINLINE) &&
@@ -894,10 +950,22 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
                else
                        answ = 0;
                release_sock(sk);
-               return put_user(answ, (int __user *)arg);
+               ret = put_user(answ, (int __user *)arg);
+               break;
+
+       case SIOCPNENABLEPIPE:
+               lock_sock(sk);
+               if (sk->sk_state == TCP_SYN_SENT)
+                       ret =  -EBUSY;
+               else if (sk->sk_state == TCP_ESTABLISHED)
+                       ret = -EISCONN;
+               else
+                       ret = pep_sock_enable(sk, NULL, 0);
+               release_sock(sk);
+               break;
        }
 
-       return -ENOIOCTLCMD;
+       return ret;
 }
 
 static int pep_init(struct sock *sk)
@@ -960,6 +1028,18 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
                }
                goto out_norel;
 
+       case PNPIPE_HANDLE:
+               if ((sk->sk_state == TCP_CLOSE) &&
+                       (val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
+                       pn->pipe_handle = val;
+               else
+                       err = -EINVAL;
+               break;
+
+       case PNPIPE_INITSTATE:
+               pn->init_enable = !!val;
+               break;
+
        default:
                err = -ENOPROTOOPT;
        }
@@ -995,6 +1075,10 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
                        return -EINVAL;
                break;
 
+       case PNPIPE_INITSTATE:
+               val = pn->init_enable;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
index 4cf6dc7910e4b0c13a2baf32447bd9000feb1f19..ec753b3ae72ade6005cecee012ec1e2759a7fb45 100644 (file)
@@ -9,7 +9,6 @@ config RDS
 
 config RDS_RDMA
        tristate "RDS over Infiniband and iWARP"
-       select LLIST
        depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
        ---help---
          Allow RDS to use Infiniband and iWARP as a transport.
index 2ebfe8d0e87367e9ec695de4d06648fd1f5d9a76..11da3018a85321739625f678cbf8009380b683ff 100644 (file)
@@ -36,12 +36,12 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
        if (blocked) {
                if (rfkill_data->reg_enabled) {
                        regulator_disable(rfkill_data->vcc);
-                       rfkill_data->reg_enabled = 0;
+                       rfkill_data->reg_enabled = false;
                }
        } else {
                if (!rfkill_data->reg_enabled) {
                        regulator_enable(rfkill_data->vcc);
-                       rfkill_data->reg_enabled = 1;
+                       rfkill_data->reg_enabled = true;
                }
        }
 
@@ -96,7 +96,7 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
 
        if (regulator_is_enabled(vcc)) {
                dev_dbg(&pdev->dev, "Regulator already enabled\n");
-               rfkill_data->reg_enabled = 1;
+               rfkill_data->reg_enabled = true;
        }
        rfkill_data->vcc = vcc;
        rfkill_data->rf_kill = rf_kill;
index f99cfce7ca9706665520ba86cf4b7feb75c726c7..c3126e864f3c4df01319c9bdea7eb6f91a259800 100644 (file)
@@ -195,7 +195,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
                sp = rxrpc_skb(txb);
 
                if (sp->need_resend) {
-                       sp->need_resend = 0;
+                       sp->need_resend = false;
 
                        /* each Tx packet has a new serial number */
                        sp->hdr.serial =
@@ -216,7 +216,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
                }
 
                if (time_after_eq(jiffies + 1, sp->resend_at)) {
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        resend |= 1;
                } else if (resend & 2) {
                        if (time_before(sp->resend_at, resend_at))
@@ -265,7 +265,7 @@ static void rxrpc_resend_timer(struct rxrpc_call *call)
                if (sp->need_resend) {
                        ;
                } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        resend |= 1;
                } else if (resend & 2) {
                        if (time_before(sp->resend_at, resend_at))
@@ -314,11 +314,11 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
 
                switch (sacks[loop]) {
                case RXRPC_ACK_TYPE_ACK:
-                       sp->need_resend = 0;
+                       sp->need_resend = false;
                        *p_txb |= 1;
                        break;
                case RXRPC_ACK_TYPE_NACK:
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        *p_txb &= ~1;
                        resend = 1;
                        break;
@@ -344,13 +344,13 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
 
                if (*p_txb & 1) {
                        /* packet must have been discarded */
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        *p_txb &= ~1;
                        resend |= 1;
                } else if (sp->need_resend) {
                        ;
                } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
-                       sp->need_resend = 1;
+                       sp->need_resend = true;
                        resend |= 1;
                } else if (resend & 2) {
                        if (time_before(sp->resend_at, resend_at))
index 43ea7de2fc8ee8e4cef2c006052d3f11f9f67167..4cba13e46ffd7e13868526ec5acaee5fdde005e2 100644 (file)
@@ -306,10 +306,9 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
        td->data_len = len;
 
        if (len > 0) {
-               td->data = kmalloc(len, GFP_KERNEL);
+               td->data = kmemdup(xdr, len, GFP_KERNEL);
                if (!td->data)
                        return -ENOMEM;
-               memcpy(td->data, xdr, len);
                len = (len + 3) & ~3;
                toklen -= len;
                xdr += len >> 2;
@@ -401,10 +400,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
        _debug("ticket len %u", len);
 
        if (len > 0) {
-               *_ticket = kmalloc(len, GFP_KERNEL);
+               *_ticket = kmemdup(xdr, len, GFP_KERNEL);
                if (!*_ticket)
                        return -ENOMEM;
-               memcpy(*_ticket, xdr, len);
                len = (len + 3) & ~3;
                toklen -= len;
                xdr += len >> 2;
index 338d793c71130f7546ac713c21a3cc01bae7f1e4..16ae88762d00dae80d0bfcfe0ec0b467372c62ce 100644 (file)
@@ -486,7 +486,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
        _proto("Tx DATA %%%u { #%u }",
               ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
 
-       sp->need_resend = 0;
+       sp->need_resend = false;
        sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
        if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
                _debug("run timer");
@@ -508,7 +508,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        if (ret < 0) {
                _debug("need instant resend %d", ret);
-               sp->need_resend = 1;
+               sp->need_resend = true;
                rxrpc_instant_resend(call);
        }
 
index 7b582300d051dcb81765f07714b4e9d16e056a6a..1d8bd0dbcd1fff38ed191b89decb3f0afb0456af 100644 (file)
@@ -26,6 +26,8 @@
 #include <net/pkt_cls.h>
 #include <net/ip.h>
 #include <net/route.h>
+#include <net/flow_keys.h>
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -66,134 +68,37 @@ static inline u32 addr_fold(void *addr)
        return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
 }
 
-static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be32 *data = NULL, hdata;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               data = skb_header_pointer(skb,
-                                         nhoff + offsetof(struct iphdr,
-                                                          saddr),
-                                         4, &hdata);
-               break;
-       case htons(ETH_P_IPV6):
-               data = skb_header_pointer(skb,
-                                        nhoff + offsetof(struct ipv6hdr,
-                                                         saddr.s6_addr32[3]),
-                                        4, &hdata);
-               break;
-       }
-
-       if (data)
-               return ntohl(*data);
+       if (flow->src)
+               return ntohl(flow->src);
        return addr_fold(skb->sk);
 }
 
-static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be32 *data = NULL, hdata;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               data = skb_header_pointer(skb,
-                                         nhoff + offsetof(struct iphdr,
-                                                          daddr),
-                                         4, &hdata);
-               break;
-       case htons(ETH_P_IPV6):
-               data = skb_header_pointer(skb,
-                                        nhoff + offsetof(struct ipv6hdr,
-                                                         daddr.s6_addr32[3]),
-                                        4, &hdata);
-               break;
-       }
-
-       if (data)
-               return ntohl(*data);
+       if (flow->dst)
+               return ntohl(flow->dst);
        return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 }
 
-static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __u8 *data = NULL, hdata;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               data = skb_header_pointer(skb,
-                                         nhoff + offsetof(struct iphdr,
-                                                          protocol),
-                                         1, &hdata);
-               break;
-       case htons(ETH_P_IPV6):
-               data = skb_header_pointer(skb,
-                                        nhoff + offsetof(struct ipv6hdr,
-                                                         nexthdr),
-                                        1, &hdata);
-               break;
-       }
-       if (data)
-               return *data;
-       return 0;
+       return flow->ip_proto;
 }
 
-/* helper function to get either src or dst port */
-static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
-                                    __be16 *_port, int dst)
+static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be16 *port = NULL;
-       int poff;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP): {
-               struct iphdr *iph, _iph;
-
-               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
-               if (!iph)
-                       break;
-               if (ip_is_fragment(iph))
-                       break;
-               poff = proto_ports_offset(iph->protocol);
-               if (poff >= 0)
-                       port = skb_header_pointer(skb,
-                                       nhoff + iph->ihl * 4 + poff + dst,
-                                       sizeof(*_port), _port);
-               break;
-       }
-       case htons(ETH_P_IPV6): {
-               struct ipv6hdr *iph, _iph;
-
-               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
-               if (!iph)
-                       break;
-               poff = proto_ports_offset(iph->nexthdr);
-               if (poff >= 0)
-                       port = skb_header_pointer(skb,
-                                       nhoff + sizeof(*iph) + poff + dst,
-                                       sizeof(*_port), _port);
-               break;
-       }
-       }
-
-       return port;
-}
-
-static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
-{
-       __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
-
-       if (port)
-               return ntohs(*port);
+       if (flow->ports)
+               return ntohs(flow->port16[0]);
 
        return addr_fold(skb->sk);
 }
 
-static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
-
-       if (port)
-               return ntohs(*port);
+       if (flow->ports)
+               return ntohs(flow->port16[1]);
 
        return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 }
@@ -239,7 +144,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 })
 #endif
 
-static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        switch (skb->protocol) {
        case htons(ETH_P_IP):
@@ -248,10 +153,10 @@ static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
                return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
        }
 fallback:
-       return flow_get_src(skb, nhoff);
+       return flow_get_src(skb, flow);
 }
 
-static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        switch (skb->protocol) {
        case htons(ETH_P_IP):
@@ -260,21 +165,21 @@ static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
                return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
        }
 fallback:
-       return flow_get_dst(skb, nhoff);
+       return flow_get_dst(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        return ntohs(CTTUPLE(skb, src.u.all));
 fallback:
-       return flow_get_proto_src(skb, nhoff);
+       return flow_get_proto_src(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        return ntohs(CTTUPLE(skb, dst.u.all));
 fallback:
-       return flow_get_proto_dst(skb, nhoff);
+       return flow_get_proto_dst(skb, flow);
 }
 
 static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -314,21 +219,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb)
        return skb_get_rxhash(skb);
 }
 
-static u32 flow_key_get(struct sk_buff *skb, int key)
+static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
 {
-       int nhoff = skb_network_offset(skb);
-
        switch (key) {
        case FLOW_KEY_SRC:
-               return flow_get_src(skb, nhoff);
+               return flow_get_src(skb, flow);
        case FLOW_KEY_DST:
-               return flow_get_dst(skb, nhoff);
+               return flow_get_dst(skb, flow);
        case FLOW_KEY_PROTO:
-               return flow_get_proto(skb, nhoff);
+               return flow_get_proto(skb, flow);
        case FLOW_KEY_PROTO_SRC:
-               return flow_get_proto_src(skb, nhoff);
+               return flow_get_proto_src(skb, flow);
        case FLOW_KEY_PROTO_DST:
-               return flow_get_proto_dst(skb, nhoff);
+               return flow_get_proto_dst(skb, flow);
        case FLOW_KEY_IIF:
                return flow_get_iif(skb);
        case FLOW_KEY_PRIORITY:
@@ -338,13 +241,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
        case FLOW_KEY_NFCT:
                return flow_get_nfct(skb);
        case FLOW_KEY_NFCT_SRC:
-               return flow_get_nfct_src(skb, nhoff);
+               return flow_get_nfct_src(skb, flow);
        case FLOW_KEY_NFCT_DST:
-               return flow_get_nfct_dst(skb, nhoff);
+               return flow_get_nfct_dst(skb, flow);
        case FLOW_KEY_NFCT_PROTO_SRC:
-               return flow_get_nfct_proto_src(skb, nhoff);
+               return flow_get_nfct_proto_src(skb, flow);
        case FLOW_KEY_NFCT_PROTO_DST:
-               return flow_get_nfct_proto_dst(skb, nhoff);
+               return flow_get_nfct_proto_dst(skb, flow);
        case FLOW_KEY_RTCLASSID:
                return flow_get_rtclassid(skb);
        case FLOW_KEY_SKUID:
@@ -361,6 +264,16 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
        }
 }
 
+#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) |                \
+                         (1 << FLOW_KEY_DST) |                 \
+                         (1 << FLOW_KEY_PROTO) |               \
+                         (1 << FLOW_KEY_PROTO_SRC) |           \
+                         (1 << FLOW_KEY_PROTO_DST) |           \
+                         (1 << FLOW_KEY_NFCT_SRC) |            \
+                         (1 << FLOW_KEY_NFCT_DST) |            \
+                         (1 << FLOW_KEY_NFCT_PROTO_SRC) |      \
+                         (1 << FLOW_KEY_NFCT_PROTO_DST))
+
 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                         struct tcf_result *res)
 {
@@ -372,17 +285,20 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        int r;
 
        list_for_each_entry(f, &head->filters, list) {
-               u32 keys[f->nkeys];
+               u32 keys[FLOW_KEY_MAX + 1];
+               struct flow_keys flow_keys;
 
                if (!tcf_em_tree_match(skb, &f->ematches, NULL))
                        continue;
 
                keymask = f->keymask;
+               if (keymask & FLOW_KEYS_NEEDED)
+                       skb_flow_dissect(skb, &flow_keys);
 
                for (n = 0; n < f->nkeys; n++) {
                        key = ffs(keymask) - 1;
                        keymask &= ~(1 << key);
-                       keys[n] = flow_key_get(skb, key);
+                       keys[n] = flow_key_get(skb, key, &flow_keys);
                }
 
                if (f->mode == FLOW_MODE_HASH)
index dca6c1a576f77e70ee7cab5c187946c96a7219d2..3d8981fde3019ae09130528c3f08e4d632385f2f 100644 (file)
@@ -618,20 +618,24 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
 }
 EXPORT_SYMBOL(qdisc_class_hash_remove);
 
-/* Allocate an unique handle from space managed by kernel */
-
+/* Allocate an unique handle from space managed by kernel
+ * Possible range is [8000-FFFF]:0000 (0x8000 values)
+ */
 static u32 qdisc_alloc_handle(struct net_device *dev)
 {
-       int i = 0x10000;
+       int i = 0x8000;
        static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 
        do {
                autohandle += TC_H_MAKE(0x10000U, 0);
                if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
                        autohandle = TC_H_MAKE(0x80000000U, 0);
-       } while (qdisc_lookup(dev, autohandle) && --i > 0);
+               if (!qdisc_lookup(dev, autohandle))
+                       return autohandle;
+               cond_resched();
+       } while (--i > 0);
 
-       return i > 0 ? autohandle : 0;
+       return 0;
 }
 
 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
index 3422b25df9e46d8149ea0591ddc1625740b64bb6..bef00acb8bd2e9f392f433c9d7579a5b02d80ffb 100644 (file)
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/red.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
+#include <net/flow_keys.h>
 
 /*
    CHOKe stateless AQM for fair bandwidth allocation
@@ -142,85 +139,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
        --sch->q.qlen;
 }
 
-/*
- * Compare flow of two packets
- *  Returns true only if source and destination address and port match.
- *          false for special cases
- */
-static bool choke_match_flow(struct sk_buff *skb1,
-                            struct sk_buff *skb2)
-{
-       int off1, off2, poff;
-       const u32 *ports1, *ports2;
-       u8 ip_proto;
-       __u32 hash1;
-
-       if (skb1->protocol != skb2->protocol)
-               return false;
-
-       /* Use hash value as quick check
-        * Assumes that __skb_get_rxhash makes IP header and ports linear
-        */
-       hash1 = skb_get_rxhash(skb1);
-       if (!hash1 || hash1 != skb_get_rxhash(skb2))
-               return false;
-
-       /* Probably match, but be sure to avoid hash collisions */
-       off1 = skb_network_offset(skb1);
-       off2 = skb_network_offset(skb2);
-
-       switch (skb1->protocol) {
-       case __constant_htons(ETH_P_IP): {
-               const struct iphdr *ip1, *ip2;
-
-               ip1 = (const struct iphdr *) (skb1->data + off1);
-               ip2 = (const struct iphdr *) (skb2->data + off2);
-
-               ip_proto = ip1->protocol;
-               if (ip_proto != ip2->protocol ||
-                   ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
-                       return false;
-
-               if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
-                       ip_proto = 0;
-               off1 += ip1->ihl * 4;
-               off2 += ip2->ihl * 4;
-               break;
-       }
-
-       case __constant_htons(ETH_P_IPV6): {
-               const struct ipv6hdr *ip1, *ip2;
-
-               ip1 = (const struct ipv6hdr *) (skb1->data + off1);
-               ip2 = (const struct ipv6hdr *) (skb2->data + off2);
-
-               ip_proto = ip1->nexthdr;
-               if (ip_proto != ip2->nexthdr ||
-                   ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
-                   ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
-                       return false;
-               off1 += 40;
-               off2 += 40;
-       }
-
-       default: /* Maybe compare MAC header here? */
-               return false;
-       }
-
-       poff = proto_ports_offset(ip_proto);
-       if (poff < 0)
-               return true;
-
-       off1 += poff;
-       off2 += poff;
-
-       ports1 = (__force u32 *)(skb1->data + off1);
-       ports2 = (__force u32 *)(skb2->data + off2);
-       return *ports1 == *ports2;
-}
-
 struct choke_skb_cb {
-       u16 classid;
+       u16                     classid;
+       u8                      keys_valid;
+       struct flow_keys        keys;
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -240,6 +162,32 @@ static u16 choke_get_classid(const struct sk_buff *skb)
        return choke_skb_cb(skb)->classid;
 }
 
+/*
+ * Compare flow of two packets
+ *  Returns true only if source and destination address and port match.
+ *          false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+                            struct sk_buff *skb2)
+{
+       if (skb1->protocol != skb2->protocol)
+               return false;
+
+       if (!choke_skb_cb(skb1)->keys_valid) {
+               choke_skb_cb(skb1)->keys_valid = 1;
+               skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
+       }
+
+       if (!choke_skb_cb(skb2)->keys_valid) {
+               choke_skb_cb(skb2)->keys_valid = 1;
+               skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
+       }
+
+       return !memcmp(&choke_skb_cb(skb1)->keys,
+                      &choke_skb_cb(skb2)->keys,
+                      sizeof(struct flow_keys));
+}
+
 /*
  * Classify flow using either:
  *  1. pre-existing classification result in skb
@@ -326,6 +274,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                        goto other_drop;        /* Packet was eaten by filter */
        }
 
+       choke_skb_cb(skb)->keys_valid = 0;
        /* Compute average queue usage (see RED) */
        p->qavg = red_calc_qavg(p, sch->q.qlen);
        if (red_is_idling(p))
@@ -445,6 +394,7 @@ static void choke_reset(struct Qdisc *sch)
 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
        [TCA_CHOKE_PARMS]       = { .len = sizeof(struct tc_red_qopt) },
        [TCA_CHOKE_STAB]        = { .len = RED_STAB_SIZE },
+       [TCA_CHOKE_MAX_P]       = { .type = NLA_U32 },
 };
 
 
@@ -466,6 +416,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
        int err;
        struct sk_buff **old = NULL;
        unsigned int mask;
+       u32 max_P;
 
        if (opt == NULL)
                return -EINVAL;
@@ -478,6 +429,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_CHOKE_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
        if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -527,7 +480,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
                      ctl->Plog, ctl->Scell_log,
-                     nla_data(tb[TCA_CHOKE_STAB]));
+                     nla_data(tb[TCA_CHOKE_STAB]),
+                     max_P);
 
        if (q->head == q->tail)
                red_end_of_idle_period(&q->parms);
@@ -561,6 +515,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
 
        NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+       NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index 69fca2798804f29f5cd3cbdadd139fdc6801e9d5..67fc573e013a063b524fa8b85512a28c887689f9 100644 (file)
@@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
 
                /* check the reason of requeuing without tx lock first */
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-               if (!netif_tx_queue_frozen_or_stopped(txq)) {
+               if (!netif_xmit_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
                        q->q.qlen--;
                } else
@@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        spin_unlock(root_lock);
 
        HARD_TX_LOCK(dev, txq, smp_processor_id());
-       if (!netif_tx_queue_frozen_or_stopped(txq))
+       if (!netif_xmit_frozen_or_stopped(txq))
                ret = dev_hard_start_xmit(skb, dev, txq);
 
        HARD_TX_UNLOCK(dev, txq);
@@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
                ret = dev_requeue_skb(skb, q);
        }
 
-       if (ret && netif_tx_queue_frozen_or_stopped(txq))
+       if (ret && netif_xmit_frozen_or_stopped(txq))
                ret = 0;
 
        return ret;
@@ -242,10 +242,11 @@ static void dev_watchdog(unsigned long arg)
                                 * old device drivers set dev->trans_start
                                 */
                                trans_start = txq->trans_start ? : dev->trans_start;
-                               if (netif_tx_queue_stopped(txq) &&
+                               if (netif_xmit_stopped(txq) &&
                                    time_after(jiffies, (trans_start +
                                                         dev->watchdog_timeo))) {
                                        some_queue_timedout = 1;
+                                       txq->trans_timeout++;
                                        break;
                                }
                        }
index b9493a09a870343fe90444bea4b1fac547d42e46..53204de71c39fa07ea23dddf71ed4896e5812154 100644 (file)
@@ -34,7 +34,7 @@ struct gred_sched;
 
 struct gred_sched_data {
        u32             limit;          /* HARD maximal queue length    */
-       u32             DP;             /* the drop pramaters */
+       u32             DP;             /* the drop parameters */
        u32             bytesin;        /* bytes seen on virtualQ so far*/
        u32             packetsin;      /* packets seen on virtualQ so far*/
        u32             backlog;        /* bytes on the virtualQ */
@@ -379,18 +379,20 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 }
 
 static inline int gred_change_vq(struct Qdisc *sch, int dp,
-                                struct tc_gred_qopt *ctl, int prio, u8 *stab)
+                                struct tc_gred_qopt *ctl, int prio,
+                                u8 *stab, u32 max_P,
+                                struct gred_sched_data **prealloc)
 {
        struct gred_sched *table = qdisc_priv(sch);
-       struct gred_sched_data *q;
+       struct gred_sched_data *q = table->tab[dp];
 
-       if (table->tab[dp] == NULL) {
-               table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
-               if (table->tab[dp] == NULL)
+       if (!q) {
+               table->tab[dp] = q = *prealloc;
+               *prealloc = NULL;
+               if (!q)
                        return -ENOMEM;
        }
 
-       q = table->tab[dp];
        q->DP = dp;
        q->prio = prio;
        q->limit = ctl->limit;
@@ -400,7 +402,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
 
        red_set_parms(&q->parms,
                      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
-                     ctl->Scell_log, stab);
+                     ctl->Scell_log, stab, max_P);
 
        return 0;
 }
@@ -409,6 +411,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
        [TCA_GRED_PARMS]        = { .len = sizeof(struct tc_gred_qopt) },
        [TCA_GRED_STAB]         = { .len = 256 },
        [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
+       [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
 };
 
 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -418,6 +421,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
        struct nlattr *tb[TCA_GRED_MAX + 1];
        int err, prio = GRED_DEF_PRIO;
        u8 *stab;
+       u32 max_P;
+       struct gred_sched_data *prealloc;
 
        if (opt == NULL)
                return -EINVAL;
@@ -433,6 +438,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_GRED_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+
        err = -EINVAL;
        ctl = nla_data(tb[TCA_GRED_PARMS]);
        stab = nla_data(tb[TCA_GRED_STAB]);
@@ -455,9 +462,10 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
                        prio = ctl->prio;
        }
 
+       prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
        sch_tree_lock(sch);
 
-       err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
+       err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
        if (err < 0)
                goto errout_locked;
 
@@ -471,6 +479,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
 
 errout_locked:
        sch_tree_unlock(sch);
+       kfree(prealloc);
 errout:
        return err;
 }
@@ -498,6 +507,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct gred_sched *table = qdisc_priv(sch);
        struct nlattr *parms, *opts = NULL;
        int i;
+       u32 max_p[MAX_DPs];
        struct tc_gred_sopt sopt = {
                .DPs    = table->DPs,
                .def_DP = table->def,
@@ -509,6 +519,14 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
        NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+
+       for (i = 0; i < MAX_DPs; i++) {
+               struct gred_sched_data *q = table->tab[i];
+
+               max_p[i] = q ? q->parms.max_P : 0;
+       }
+       NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+
        parms = nla_nest_start(skb, TCA_GRED_PARMS);
        if (parms == NULL)
                goto nla_put_failure;
index 6488e64256521f69512ee3be26589dd265e3f9bb..9bdca2e011e9122de321353037d4ed2891008601 100644 (file)
@@ -1368,6 +1368,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        struct tc_hfsc_stats xstats;
 
        cl->qstats.qlen = cl->qdisc->q.qlen;
+       cl->qstats.backlog = cl->qdisc->qstats.backlog;
        xstats.level   = cl->level;
        xstats.period  = cl->cl_vtperiod;
        xstats.work    = cl->cl_total;
@@ -1561,6 +1562,15 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        struct hfsc_sched *q = qdisc_priv(sch);
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_hfsc_qopt qopt;
+       struct hfsc_class *cl;
+       struct hlist_node *n;
+       unsigned int i;
+
+       sch->qstats.backlog = 0;
+       for (i = 0; i < q->clhash.hashsize; i++) {
+               hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+                       sch->qstats.backlog += cl->qdisc->qstats.backlog;
+       }
 
        qopt.defcls = q->defcls;
        NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
index f88256cbacbfe4b89c202d591514fb28feaac11b..28de43092330abc125423d5328babc709b1f986f 100644 (file)
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!netif_is_multiqueue(dev))
                return -EOPNOTSUPP;
 
-       if (nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
        qopt = nla_data(opt);
index edc1950e0e7722d77e8b67a2e298bb4d1038e479..49131d7a7446eafb22c121ce987f0fd7e4e233a5 100644 (file)
@@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
                /* Check that target subqueue is available before
                 * pulling an skb to avoid head-of-line blocking.
                 */
-               if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
+               if (!netif_xmit_stopped(
+                   netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
                        qdisc = q->queues[q->curband];
                        skb = qdisc->dequeue(qdisc);
                        if (skb) {
@@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
                /* Check that target subqueue is available before
                 * pulling an skb to avoid head-of-line blocking.
                 */
-               if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
+               if (!netif_xmit_stopped(
+                   netdev_get_tx_queue(qdisc_dev(sch), curband))) {
                        qdisc = q->queues[curband];
                        skb = qdisc->ops->peek(qdisc);
                        if (skb)
index eb3b9a86c6ed93d502a4629f241a0d2496b01385..06a5cebad3426b67df67c41f39d2275ae8163102 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
+#include <linux/reciprocal_div.h>
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 */
 
 struct netem_sched_data {
+       /* internal t(ime)fifo qdisc uses sch->q and sch->limit */
+
+       /* optional qdisc for classful handling (NULL at netem init) */
        struct Qdisc    *qdisc;
+
        struct qdisc_watchdog watchdog;
 
        psched_tdiff_t latency;
@@ -79,6 +84,11 @@ struct netem_sched_data {
        u32 duplicate;
        u32 reorder;
        u32 corrupt;
+       u32 rate;
+       s32 packet_overhead;
+       u32 cell_size;
+       u32 cell_size_reciprocal;
+       s32 cell_overhead;
 
        struct crndstate {
                u32 last;
@@ -111,7 +121,9 @@ struct netem_sched_data {
 
 };
 
-/* Time stamp put into socket buffer control block */
+/* Time stamp put into socket buffer control block
+ * Only valid when skbs are in our internal t(ime)fifo queue.
+ */
 struct netem_skb_cb {
        psched_time_t   time_to_send;
 };
@@ -298,6 +310,51 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
        return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 }
 
+static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
+{
+       u64 ticks;
+
+       len += q->packet_overhead;
+
+       if (q->cell_size) {
+               u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
+
+               if (len > cells * q->cell_size) /* extra cell needed for remainder */
+                       cells++;
+               len = cells * (q->cell_size + q->cell_overhead);
+       }
+
+       ticks = (u64)len * NSEC_PER_SEC;
+
+       do_div(ticks, q->rate);
+       return PSCHED_NS2TICKS(ticks);
+}
+
+static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+{
+       struct sk_buff_head *list = &sch->q;
+       psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
+       struct sk_buff *skb;
+
+       if (likely(skb_queue_len(list) < sch->limit)) {
+               skb = skb_peek_tail(list);
+               /* Optimize for add at tail */
+               if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+                       return qdisc_enqueue_tail(nskb, sch);
+
+               skb_queue_reverse_walk(list, skb) {
+                       if (tnext >= netem_skb_cb(skb)->time_to_send)
+                               break;
+               }
+
+               __skb_queue_after(list, skb, nskb);
+               sch->qstats.backlog += qdisc_pkt_len(nskb);
+               return NET_XMIT_SUCCESS;
+       }
+
+       return qdisc_reshape_fail(nskb, sch);
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -371,9 +428,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                                  &q->delay_cor, q->delay_dist);
 
                now = psched_get_time();
+
+               if (q->rate) {
+                       struct sk_buff_head *list = &sch->q;
+
+                       delay += packet_len_2_sched_time(skb->len, q);
+
+                       if (!skb_queue_empty(list)) {
+                               /*
+                                * Last packet in queue is reference point (now).
+                                * First packet in queue is already in flight,
+                                * calculate this time bonus and substract
+                                * from delay.
+                                */
+                               delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+                               now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
+                       }
+               }
+
                cb->time_to_send = now + delay;
                ++q->counter;
-               ret = qdisc_enqueue(skb, q->qdisc);
+               ret = tfifo_enqueue(skb, sch);
        } else {
                /*
                 * Do re-ordering by putting one out of N packets at the front
@@ -382,7 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                cb->time_to_send = psched_get_time();
                q->counter = 0;
 
-               __skb_queue_head(&q->qdisc->q, skb);
+               __skb_queue_head(&sch->q, skb);
                q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
                q->qdisc->qstats.requeues++;
                ret = NET_XMIT_SUCCESS;
@@ -395,19 +470,20 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                }
        }
 
-       sch->q.qlen++;
        return NET_XMIT_SUCCESS;
 }
 
 static unsigned int netem_drop(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
-       unsigned int len = 0;
+       unsigned int len;
 
-       if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
-               sch->q.qlen--;
+       len = qdisc_queue_drop(sch);
+       if (!len && q->qdisc && q->qdisc->ops->drop)
+           len = q->qdisc->ops->drop(q->qdisc);
+       if (len)
                sch->qstats.drops++;
-       }
+
        return len;
 }
 
@@ -419,16 +495,16 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
        if (qdisc_is_throttled(sch))
                return NULL;
 
-       skb = q->qdisc->ops->peek(q->qdisc);
+tfifo_dequeue:
+       skb = qdisc_peek_head(sch);
        if (skb) {
                const struct netem_skb_cb *cb = netem_skb_cb(skb);
-               psched_time_t now = psched_get_time();
 
                /* if more time remaining? */
-               if (cb->time_to_send <= now) {
-                       skb = qdisc_dequeue_peeked(q->qdisc);
+               if (cb->time_to_send <= psched_get_time()) {
+                       skb = qdisc_dequeue_tail(sch);
                        if (unlikely(!skb))
-                               return NULL;
+                               goto qdisc_dequeue;
 
 #ifdef CONFIG_NET_CLS_ACT
                        /*
@@ -439,15 +515,37 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
                                skb->tstamp.tv64 = 0;
 #endif
 
-                       sch->q.qlen--;
+                       if (q->qdisc) {
+                               int err = qdisc_enqueue(skb, q->qdisc);
+
+                               if (unlikely(err != NET_XMIT_SUCCESS)) {
+                                       if (net_xmit_drop_count(err)) {
+                                               sch->qstats.drops++;
+                                               qdisc_tree_decrease_qlen(sch, 1);
+                                       }
+                               }
+                               goto tfifo_dequeue;
+                       }
+deliver:
                        qdisc_unthrottled(sch);
                        qdisc_bstats_update(sch, skb);
                        return skb;
                }
 
+               if (q->qdisc) {
+                       skb = q->qdisc->ops->dequeue(q->qdisc);
+                       if (skb)
+                               goto deliver;
+               }
                qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
        }
 
+qdisc_dequeue:
+       if (q->qdisc) {
+               skb = q->qdisc->ops->dequeue(q->qdisc);
+               if (skb)
+                       goto deliver;
+       }
        return NULL;
 }
 
@@ -455,8 +553,9 @@ static void netem_reset(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
-       qdisc_reset(q->qdisc);
-       sch->q.qlen = 0;
+       qdisc_reset_queue(sch);
+       if (q->qdisc)
+               qdisc_reset(q->qdisc);
        qdisc_watchdog_cancel(&q->watchdog);
 }
 
@@ -488,7 +587,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
                return -EINVAL;
 
        s = sizeof(struct disttable) + n * sizeof(s16);
-       d = kmalloc(s, GFP_KERNEL);
+       d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
        if (!d)
                d = vmalloc(s);
        if (!d)
@@ -501,9 +600,10 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        root_lock = qdisc_root_sleeping_lock(sch);
 
        spin_lock_bh(root_lock);
-       dist_free(q->delay_dist);
-       q->delay_dist = d;
+       swap(q->delay_dist, d);
        spin_unlock_bh(root_lock);
+
+       dist_free(d);
        return 0;
 }
 
@@ -535,6 +635,19 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
        init_crandom(&q->corrupt_cor, r->correlation);
 }
 
+static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       const struct tc_netem_rate *r = nla_data(attr);
+
+       q->rate = r->rate;
+       q->packet_overhead = r->packet_overhead;
+       q->cell_size = r->cell_size;
+       if (q->cell_size)
+               q->cell_size_reciprocal = reciprocal_value(q->cell_size);
+       q->cell_overhead = r->cell_overhead;
+}
+
 static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
@@ -548,7 +661,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
                case NETEM_LOSS_GI: {
                        const struct tc_netem_gimodel *gi = nla_data(la);
 
-                       if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+                       if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
                                pr_info("netem: incorrect gi model size\n");
                                return -EINVAL;
                        }
@@ -567,8 +680,8 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
                case NETEM_LOSS_GE: {
                        const struct tc_netem_gemodel *ge = nla_data(la);
 
-                       if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
-                               pr_info("netem: incorrect gi model size\n");
+                       if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
+                               pr_info("netem: incorrect ge model size\n");
                                return -EINVAL;
                        }
 
@@ -594,6 +707,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
        [TCA_NETEM_CORR]        = { .len = sizeof(struct tc_netem_corr) },
        [TCA_NETEM_REORDER]     = { .len = sizeof(struct tc_netem_reorder) },
        [TCA_NETEM_CORRUPT]     = { .len = sizeof(struct tc_netem_corrupt) },
+       [TCA_NETEM_RATE]        = { .len = sizeof(struct tc_netem_rate) },
        [TCA_NETEM_LOSS]        = { .type = NLA_NESTED },
 };
 
@@ -631,11 +745,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        if (ret < 0)
                return ret;
 
-       ret = fifo_set_limit(q->qdisc, qopt->limit);
-       if (ret) {
-               pr_info("netem: can't set fifo limit\n");
-               return ret;
-       }
+       sch->limit = qopt->limit;
 
        q->latency = qopt->latency;
        q->jitter = qopt->jitter;
@@ -666,6 +776,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_NETEM_CORRUPT])
                get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
 
+       if (tb[TCA_NETEM_RATE])
+               get_rate(sch, tb[TCA_NETEM_RATE]);
+
        q->loss_model = CLG_RANDOM;
        if (tb[TCA_NETEM_LOSS])
                ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -673,88 +786,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        return ret;
 }
 
-/*
- * Special case version of FIFO queue for use by netem.
- * It queues in order based on timestamps in skb's
- */
-struct fifo_sched_data {
-       u32 limit;
-       psched_time_t oldest;
-};
-
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
-{
-       struct fifo_sched_data *q = qdisc_priv(sch);
-       struct sk_buff_head *list = &sch->q;
-       psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
-       struct sk_buff *skb;
-
-       if (likely(skb_queue_len(list) < q->limit)) {
-               /* Optimize for add at tail */
-               if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
-                       q->oldest = tnext;
-                       return qdisc_enqueue_tail(nskb, sch);
-               }
-
-               skb_queue_reverse_walk(list, skb) {
-                       const struct netem_skb_cb *cb = netem_skb_cb(skb);
-
-                       if (tnext >= cb->time_to_send)
-                               break;
-               }
-
-               __skb_queue_after(list, skb, nskb);
-
-               sch->qstats.backlog += qdisc_pkt_len(nskb);
-
-               return NET_XMIT_SUCCESS;
-       }
-
-       return qdisc_reshape_fail(nskb, sch);
-}
-
-static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
-{
-       struct fifo_sched_data *q = qdisc_priv(sch);
-
-       if (opt) {
-               struct tc_fifo_qopt *ctl = nla_data(opt);
-               if (nla_len(opt) < sizeof(*ctl))
-                       return -EINVAL;
-
-               q->limit = ctl->limit;
-       } else
-               q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
-
-       q->oldest = PSCHED_PASTPERFECT;
-       return 0;
-}
-
-static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
-       struct fifo_sched_data *q = qdisc_priv(sch);
-       struct tc_fifo_qopt opt = { .limit = q->limit };
-
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
-       return skb->len;
-
-nla_put_failure:
-       return -1;
-}
-
-static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
-       .id             =       "tfifo",
-       .priv_size      =       sizeof(struct fifo_sched_data),
-       .enqueue        =       tfifo_enqueue,
-       .dequeue        =       qdisc_dequeue_head,
-       .peek           =       qdisc_peek_head,
-       .drop           =       qdisc_queue_drop,
-       .init           =       tfifo_init,
-       .reset          =       qdisc_reset_queue,
-       .change         =       tfifo_init,
-       .dump           =       tfifo_dump,
-};
-
 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
@@ -766,18 +797,9 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
        qdisc_watchdog_init(&q->watchdog, sch);
 
        q->loss_model = CLG_RANDOM;
-       q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
-                                    TC_H_MAKE(sch->handle, 1));
-       if (!q->qdisc) {
-               pr_notice("netem: qdisc create tfifo qdisc failed\n");
-               return -ENOMEM;
-       }
-
        ret = netem_change(sch, opt);
-       if (ret) {
+       if (ret)
                pr_info("netem: change failed\n");
-               qdisc_destroy(q->qdisc);
-       }
        return ret;
 }
 
@@ -786,7 +808,8 @@ static void netem_destroy(struct Qdisc *sch)
        struct netem_sched_data *q = qdisc_priv(sch);
 
        qdisc_watchdog_cancel(&q->watchdog);
-       qdisc_destroy(q->qdisc);
+       if (q->qdisc)
+               qdisc_destroy(q->qdisc);
        dist_free(q->delay_dist);
 }
 
@@ -846,6 +869,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct tc_netem_corr cor;
        struct tc_netem_reorder reorder;
        struct tc_netem_corrupt corrupt;
+       struct tc_netem_rate rate;
 
        qopt.latency = q->latency;
        qopt.jitter = q->jitter;
@@ -868,6 +892,12 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        corrupt.correlation = q->corrupt_cor.rho;
        NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
+       rate.rate = q->rate;
+       rate.packet_overhead = q->packet_overhead;
+       rate.cell_size = q->cell_size;
+       rate.cell_overhead = q->cell_overhead;
+       NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+
        if (dump_loss_model(q, skb) != 0)
                goto nla_put_failure;
 
@@ -883,7 +913,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
-       if (cl != 1)    /* only one class */
+       if (cl != 1 || !q->qdisc)       /* only one class */
                return -ENOENT;
 
        tcm->tcm_handle |= TC_H_MIN(1);
@@ -897,14 +927,13 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
-       if (new == NULL)
-               new = &noop_qdisc;
-
        sch_tree_lock(sch);
        *old = q->qdisc;
        q->qdisc = new;
-       qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-       qdisc_reset(*old);
+       if (*old) {
+               qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+               qdisc_reset(*old);
+       }
        sch_tree_unlock(sch);
 
        return 0;
index 103343408593589e8f2343987f3f4b44afca5edb..e68cb440756a4c33ce1108d464ea51b1c0b95631 100644 (file)
@@ -211,6 +211,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        struct nlattr *tb[TCA_QFQ_MAX + 1];
        u32 weight, lmax, inv_w;
        int i, err;
+       int delta_w;
 
        if (tca[TCA_OPTIONS] == NULL) {
                pr_notice("qfq: no options\n");
@@ -232,9 +233,10 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 
        inv_w = ONE_FP / weight;
        weight = ONE_FP / inv_w;
-       if (q->wsum + weight > QFQ_MAX_WSUM) {
+       delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0);
+       if (q->wsum + delta_w > QFQ_MAX_WSUM) {
                pr_notice("qfq: total weight out of range (%u + %u)\n",
-                         weight, q->wsum);
+                         delta_w, q->wsum);
                return -EINVAL;
        }
 
@@ -256,13 +258,12 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                                return err;
                }
 
-               sch_tree_lock(sch);
-               if (tb[TCA_QFQ_WEIGHT]) {
-                       q->wsum = weight - ONE_FP / cl->inv_w;
+               if (inv_w != cl->inv_w) {
+                       sch_tree_lock(sch);
+                       q->wsum += delta_w;
                        cl->inv_w = inv_w;
+                       sch_tree_unlock(sch);
                }
-               sch_tree_unlock(sch);
-
                return 0;
        }
 
@@ -277,7 +278,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        i = qfq_calc_index(cl->inv_w, cl->lmax);
 
        cl->grp = &q->groups[i];
-       q->wsum += weight;
 
        cl->qdisc = qdisc_create_dflt(sch->dev_queue,
                                      &pfifo_qdisc_ops, classid);
@@ -294,6 +294,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                        return err;
                }
        }
+       q->wsum += weight;
 
        sch_tree_lock(sch);
        qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -817,11 +818,11 @@ skip_unblock:
 static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
 {
        unsigned long mask;
-       uint32_t limit, roundedF;
+       u64 limit, roundedF;
        int slot_shift = cl->grp->slot_shift;
 
        roundedF = qfq_round_down(cl->F, slot_shift);
-       limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
+       limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
 
        if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
                /* timestamp was stale */
index 6649463da1b68e6e59e6709c7e1d0b527a60ab63..ce2256a17d7e1e8aa509a24f914ea69d404dce7c 100644 (file)
@@ -39,6 +39,7 @@
 struct red_sched_data {
        u32                     limit;          /* HARD maximal queue length */
        unsigned char           flags;
+       struct timer_list       adapt_timer;
        struct red_parms        parms;
        struct red_stats        stats;
        struct Qdisc            *qdisc;
@@ -161,12 +162,15 @@ static void red_reset(struct Qdisc *sch)
 static void red_destroy(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
+
+       del_timer_sync(&q->adapt_timer);
        qdisc_destroy(q->qdisc);
 }
 
 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
        [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
        [TCA_RED_STAB]  = { .len = RED_STAB_SIZE },
+       [TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
 static int red_change(struct Qdisc *sch, struct nlattr *opt)
@@ -176,6 +180,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        struct tc_red_qopt *ctl;
        struct Qdisc *child = NULL;
        int err;
+       u32 max_P;
 
        if (opt == NULL)
                return -EINVAL;
@@ -188,6 +193,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_RED_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+
        ctl = nla_data(tb[TCA_RED_PARMS]);
 
        if (ctl->limit > 0) {
@@ -206,21 +213,39 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
-                                ctl->Plog, ctl->Scell_log,
-                                nla_data(tb[TCA_RED_STAB]));
+                     ctl->Plog, ctl->Scell_log,
+                     nla_data(tb[TCA_RED_STAB]),
+                     max_P);
 
-       if (skb_queue_empty(&sch->q))
-               red_end_of_idle_period(&q->parms);
+       del_timer(&q->adapt_timer);
+       if (ctl->flags & TC_RED_ADAPTATIVE)
+               mod_timer(&q->adapt_timer, jiffies + HZ/2);
+
+       if (!q->qdisc->q.qlen)
+               red_start_of_idle_period(&q->parms);
 
        sch_tree_unlock(sch);
        return 0;
 }
 
+static inline void red_adaptative_timer(unsigned long arg)
+{
+       struct Qdisc *sch = (struct Qdisc *)arg;
+       struct red_sched_data *q = qdisc_priv(sch);
+       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+
+       spin_lock(root_lock);
+       red_adaptative_algo(&q->parms);
+       mod_timer(&q->adapt_timer, jiffies + HZ/2);
+       spin_unlock(root_lock);
+}
+
 static int red_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
        q->qdisc = &noop_qdisc;
+       setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
        return red_change(sch, opt);
 }
 
@@ -243,6 +268,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
        NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
+       NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index e83c272c0325530edbedb86ac0c16c162e49fff0..96e42cae4c7a4580e753c41b9c4f1e5c29cabbcd 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
+#include <net/flow_keys.h>
 
 /*
  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
@@ -286,6 +287,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        u32 minqlen = ~0;
        u32 r, slot, salt, sfbhash;
        int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+       struct flow_keys keys;
 
        if (unlikely(sch->q.qlen >= q->limit)) {
                sch->qstats.overlimits++;
@@ -309,13 +311,19 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                /* If using external classifiers, get result and record it. */
                if (!sfb_classify(skb, q, &ret, &salt))
                        goto other_drop;
+               keys.src = salt;
+               keys.dst = 0;
+               keys.ports = 0;
        } else {
-               salt = skb_get_rxhash(skb);
+               skb_flow_dissect(skb, &keys);
        }
 
        slot = q->slot;
 
-       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+       sfbhash = jhash_3words((__force u32)keys.dst,
+                              (__force u32)keys.src,
+                              (__force u32)keys.ports,
+                              q->bins[slot].perturbation);
        if (!sfbhash)
                sfbhash = 1;
        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
@@ -347,7 +355,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (unlikely(p_min >= SFB_MAX_PROB)) {
                /* Inelastic flow */
                if (q->double_buffering) {
-                       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+                       sfbhash = jhash_3words((__force u32)keys.dst,
+                                              (__force u32)keys.src,
+                                              (__force u32)keys.ports,
+                                              q->bins[slot].perturbation);
                        if (!sfbhash)
                                sfbhash = 1;
                        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
index 4f5510e2bd6f659e85f726a239bb902d96e3c35d..843018154a5c9eae03b3d6bba577d8960fd88890 100644 (file)
 #include <linux/in.h>
 #include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/ipv6.h>
 #include <linux/skbuff.h>
 #include <linux/jhash.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/flow_keys.h>
 
 
 /*     Stochastic Fairness Queuing algorithm.
@@ -137,61 +136,31 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
        return &q->dep[val - SFQ_SLOTS];
 }
 
-static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+/*
+ * In order to be able to quickly rehash our queue when timer changes
+ * q->perturbation, we store flow_keys in skb->cb[]
+ */
+struct sfq_skb_cb {
+       struct flow_keys        keys;
+};
+
+static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
 {
-       return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
+       BUILD_BUG_ON(sizeof(skb->cb) <
+               sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
+       return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
 }
 
-static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(const struct sfq_sched_data *q,
+                            const struct sk_buff *skb)
 {
-       u32 h, h2;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-       {
-               const struct iphdr *iph;
-               int poff;
-
-               if (!pskb_network_may_pull(skb, sizeof(*iph)))
-                       goto err;
-               iph = ip_hdr(skb);
-               h = (__force u32)iph->daddr;
-               h2 = (__force u32)iph->saddr ^ iph->protocol;
-               if (ip_is_fragment(iph))
-                       break;
-               poff = proto_ports_offset(iph->protocol);
-               if (poff >= 0 &&
-                   pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
-                       iph = ip_hdr(skb);
-                       h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
-               }
-               break;
-       }
-       case htons(ETH_P_IPV6):
-       {
-               const struct ipv6hdr *iph;
-               int poff;
-
-               if (!pskb_network_may_pull(skb, sizeof(*iph)))
-                       goto err;
-               iph = ipv6_hdr(skb);
-               h = (__force u32)iph->daddr.s6_addr32[3];
-               h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
-               poff = proto_ports_offset(iph->nexthdr);
-               if (poff >= 0 &&
-                   pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
-                       iph = ipv6_hdr(skb);
-                       h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
-               }
-               break;
-       }
-       default:
-err:
-               h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
-               h2 = (unsigned long)skb->sk;
-       }
+       const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
+       unsigned int hash;
 
-       return sfq_fold_hash(q, h, h2);
+       hash = jhash_3words((__force u32)keys->dst,
+                           (__force u32)keys->src ^ keys->ip_proto,
+                           (__force u32)keys->ports, q->perturbation);
+       return hash & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -206,8 +175,10 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
            TC_H_MIN(skb->priority) <= q->divisor)
                return TC_H_MIN(skb->priority);
 
-       if (!q->filter_list)
+       if (!q->filter_list) {
+               skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
                return sfq_hash(q, skb) + 1;
+       }
 
        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        result = tc_classify(skb, q->filter_list, &res);
@@ -395,11 +366,11 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (slot->qlen == 1) {          /* The flow is new */
                if (q->tail == NULL) {  /* It is the first flow */
                        slot->next = x;
+                       q->tail = slot;
                } else {
                        slot->next = q->tail->next;
                        q->tail->next = x;
                }
-               q->tail = slot;
                slot->allot = q->scaled_quantum;
        }
        if (++sch->q.qlen <= q->limit)
@@ -468,12 +439,71 @@ sfq_reset(struct Qdisc *sch)
                kfree_skb(skb);
 }
 
+/*
+ * When q->perturbation is changed, we rehash all queued skbs
+ * to avoid OOO (Out Of Order) effects.
+ * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
+ * counters.
+ */
+static void sfq_rehash(struct sfq_sched_data *q)
+{
+       struct sk_buff *skb;
+       int i;
+       struct sfq_slot *slot;
+       struct sk_buff_head list;
+
+       __skb_queue_head_init(&list);
+
+       for (i = 0; i < SFQ_SLOTS; i++) {
+               slot = &q->slots[i];
+               if (!slot->qlen)
+                       continue;
+               while (slot->qlen) {
+                       skb = slot_dequeue_head(slot);
+                       sfq_dec(q, i);
+                       __skb_queue_tail(&list, skb);
+               }
+               q->ht[slot->hash] = SFQ_EMPTY_SLOT;
+       }
+       q->tail = NULL;
+
+       while ((skb = __skb_dequeue(&list)) != NULL) {
+               unsigned int hash = sfq_hash(q, skb);
+               sfq_index x = q->ht[hash];
+
+               slot = &q->slots[x];
+               if (x == SFQ_EMPTY_SLOT) {
+                       x = q->dep[0].next; /* get a free slot */
+                       q->ht[hash] = x;
+                       slot = &q->slots[x];
+                       slot->hash = hash;
+               }
+               slot_queue_add(slot, skb);
+               sfq_inc(q, x);
+               if (slot->qlen == 1) {          /* The flow is new */
+                       if (q->tail == NULL) {  /* It is the first flow */
+                               slot->next = x;
+                       } else {
+                               slot->next = q->tail->next;
+                               q->tail->next = x;
+                       }
+                       q->tail = slot;
+                       slot->allot = q->scaled_quantum;
+               }
+       }
+}
+
 static void sfq_perturbation(unsigned long arg)
 {
        struct Qdisc *sch = (struct Qdisc *)arg;
        struct sfq_sched_data *q = qdisc_priv(sch);
+       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
 
+       spin_lock(root_lock);
        q->perturbation = net_random();
+       if (!q->filter_list && q->tail)
+               sfq_rehash(q);
+       spin_unlock(root_lock);
 
        if (q->perturb_period)
                mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
@@ -514,10 +544,38 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
+static void *sfq_alloc(size_t sz)
+{
+       void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+       if (!ptr)
+               ptr = vmalloc(sz);
+       return ptr;
+}
+
+static void sfq_free(void *addr)
+{
+       if (addr) {
+               if (is_vmalloc_addr(addr))
+                       vfree(addr);
+               else
+                       kfree(addr);
+       }
+}
+
+static void sfq_destroy(struct Qdisc *sch)
+{
+       struct sfq_sched_data *q = qdisc_priv(sch);
+
+       tcf_destroy_chain(&q->filter_list);
+       q->perturb_period = 0;
+       del_timer_sync(&q->perturb_timer);
+       sfq_free(q->ht);
+}
+
 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
-       size_t sz;
        int i;
 
        q->perturb_timer.function = sfq_perturbation;
@@ -533,23 +591,22 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        q->cur_depth = 0;
        q->tail = NULL;
        q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
-       if (opt == NULL) {
-               q->quantum = psched_mtu(qdisc_dev(sch));
-               q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
-               q->perturb_period = 0;
-               q->perturbation = net_random();
-       } else {
+       q->quantum = psched_mtu(qdisc_dev(sch));
+       q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+       q->perturb_period = 0;
+       q->perturbation = net_random();
+
+       if (opt) {
                int err = sfq_change(sch, opt);
                if (err)
                        return err;
        }
 
-       sz = sizeof(q->ht[0]) * q->divisor;
-       q->ht = kmalloc(sz, GFP_KERNEL);
-       if (!q->ht && sz > PAGE_SIZE)
-               q->ht = vmalloc(sz);
-       if (!q->ht)
+       q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
+       if (!q->ht) {
+               sfq_destroy(sch);
                return -ENOMEM;
+       }
        for (i = 0; i < q->divisor; i++)
                q->ht[i] = SFQ_EMPTY_SLOT;
 
@@ -564,19 +621,6 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static void sfq_destroy(struct Qdisc *sch)
-{
-       struct sfq_sched_data *q = qdisc_priv(sch);
-
-       tcf_destroy_chain(&q->filter_list);
-       q->perturb_period = 0;
-       del_timer_sync(&q->perturb_timer);
-       if (is_vmalloc_addr(q->ht))
-               vfree(q->ht);
-       else
-               kfree(q->ht);
-}
-
 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
index 1dcfb5223a861fec4e5e4e41200b86f070363baf..b8e156319d7bc48c261ccad7110bbeb84ba44856 100644 (file)
@@ -346,6 +346,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct nlattr *nest;
        struct tc_tbf_qopt opt;
 
+       sch->qstats.backlog = q->qdisc->qstats.backlog;
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
index a3b7120fcc74c45cb642a7edc3813bde893240f1..45326599fda37786e7ac2104e77bbd1bfa2b5083 100644 (file)
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 
 
 static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+              struct net_device *dev, struct netdev_queue *txq,
+              struct neighbour *mn)
 {
-       struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
-       struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
-       struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+       struct teql_sched_data *q = qdisc_priv(txq->qdisc);
        struct neighbour *n = q->ncache;
 
        if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
 }
 
 static inline int teql_resolve(struct sk_buff *skb,
-                              struct sk_buff *skb_res, struct net_device *dev)
+                              struct sk_buff *skb_res,
+                              struct net_device *dev,
+                              struct netdev_queue *txq)
 {
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+       struct dst_entry *dst = skb_dst(skb);
+       struct neighbour *mn;
+       int res;
+
        if (txq->qdisc == &noop_qdisc)
                return -ENODEV;
 
-       if (dev->header_ops == NULL ||
-           skb_dst(skb) == NULL ||
-           dst_get_neighbour(skb_dst(skb)) == NULL)
+       if (!dev->header_ops || !dst)
                return 0;
-       return __teql_resolve(skb, skb_res, dev);
+
+       rcu_read_lock();
+       mn = dst_get_neighbour_noref(dst);
+       res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+       rcu_read_unlock();
+
+       return res;
 }
 
 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -301,18 +310,18 @@ restart:
 
                if (slave_txq->qdisc_sleeping != q)
                        continue;
-               if (__netif_subqueue_stopped(slave, subq) ||
+               if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
                    !netif_running(slave)) {
                        busy = 1;
                        continue;
                }
 
-               switch (teql_resolve(skb, skb_res, slave)) {
+               switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
                case 0:
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
 
-                               if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
+                               if (!netif_xmit_frozen_or_stopped(slave_txq) &&
                                    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
                                        txq_trans_update(slave_txq);
                                        __netif_tx_unlock(slave_txq);
@@ -324,7 +333,7 @@ restart:
                                }
                                __netif_tx_unlock(slave_txq);
                        }
-                       if (netif_queue_stopped(dev))
+                       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
                                busy = 1;
                        break;
                case 1:
index 152b5b3c3fffa978ae0d725d182f9aa54c8a9cf6..acd2edbc073ebf4ad334a4b0a16ff7b45413fac5 100644 (file)
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               (unsigned long)sp->autoclose * HZ;
+               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
index 865e68fef21c326c631183c7c7d5ded4ad842647..bf812048cf6f7a244c547e0cd31a731351abfab3 100644 (file)
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
        struct sctp_auth_bytes *key;
 
        /* Verify that we are not going to overflow INT_MAX */
-       if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+       if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
                return NULL;
 
        /* Allocate the shared key */
index b7692aab6e9c4f82fbfa90103b67ad964607e6b4..80f71af713841ad3a10567b30968d5dc61a91d05 100644 (file)
@@ -105,7 +105,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
 struct sctp_input_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index 810427833bcdc9096e0105bfdb1c731fd71baf31..91f479121c55b830591c1ef338cbe24c154f2773 100644 (file)
@@ -107,7 +107,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                if (addr) {
                        addr->a.v6.sin6_family = AF_INET6;
                        addr->a.v6.sin6_port = 0;
-                       ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr);
+                       addr->a.v6.sin6_addr = ifa->addr;
                        addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
                        addr->valid = 1;
                        spin_lock_bh(&sctp_local_addr_lock);
@@ -219,8 +219,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        /* Fill in the dest address from the route entry passed with the skb
         * and the source address from the transport.
         */
-       ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
-       ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
+       fl6.daddr = transport->ipaddr.v6.sin6_addr;
+       fl6.saddr = transport->saddr.v6.sin6_addr;
 
        fl6.flowlabel = np->flow_label;
        IP6_ECN_flow_xmit(sk, fl6.flowlabel);
@@ -231,7 +231,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
 
        if (np->opt && np->opt->srcrt) {
                struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-               ipv6_addr_copy(&fl6.daddr, rt0->addr);
+               fl6.daddr = *rt0->addr;
        }
 
        SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
@@ -265,7 +265,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        sctp_scope_t scope;
 
        memset(fl6, 0, sizeof(struct flowi6));
-       ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
+       fl6->daddr = daddr->v6.sin6_addr;
        fl6->fl6_dport = daddr->v6.sin6_port;
        fl6->flowi6_proto = IPPROTO_SCTP;
        if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -277,7 +277,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                fl6->fl6_sport = htons(asoc->base.bind_addr.port);
 
        if (saddr) {
-               ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
+               fl6->saddr = saddr->v6.sin6_addr;
                fl6->fl6_sport = saddr->v6.sin6_port;
                SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
        }
@@ -334,7 +334,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        }
        rcu_read_unlock();
        if (baddr) {
-               ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
+               fl6->saddr = baddr->v6.sin6_addr;
                fl6->fl6_sport = baddr->v6.sin6_port;
                dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
        }
@@ -375,7 +375,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
 
        if (t->dst) {
                saddr->v6.sin6_family = AF_INET6;
-               ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
+               saddr->v6.sin6_addr = fl6->saddr;
        }
 }
 
@@ -400,7 +400,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
                if (addr) {
                        addr->a.v6.sin6_family = AF_INET6;
                        addr->a.v6.sin6_port = 0;
-                       ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
+                       addr->a.v6.sin6_addr = ifp->addr;
                        addr->a.v6.sin6_scope_id = dev->ifindex;
                        addr->valid = 1;
                        INIT_LIST_HEAD(&addr->list);
@@ -416,7 +416,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
 static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
                             int is_saddr)
 {
-       void *from;
        __be16 *port;
        struct sctphdr *sh;
 
@@ -428,12 +427,11 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
        sh = sctp_hdr(skb);
        if (is_saddr) {
                *port  = sh->source;
-               from = &ipv6_hdr(skb)->saddr;
+               addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
        } else {
                *port = sh->dest;
-               from = &ipv6_hdr(skb)->daddr;
+               addr->v6.sin6_addr = ipv6_hdr(skb)->daddr;
        }
-       ipv6_addr_copy(&addr->v6.sin6_addr, from);
 }
 
 /* Initialize an sctp_addr from a socket. */
@@ -441,7 +439,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
 {
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_port = 0;
-       ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
+       addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
 }
 
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -454,7 +452,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
                inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
                        addr->v4.sin_addr.s_addr;
        } else {
-               ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
+               inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
        }
 }
 
@@ -467,7 +465,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
                inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
                inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        } else {
-               ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
+               inet6_sk(sk)->daddr = addr->v6.sin6_addr;
        }
 }
 
@@ -479,7 +477,7 @@ static void sctp_v6_from_addr_param(union sctp_addr *addr,
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_port = port;
        addr->v6.sin6_flowinfo = 0; /* BUG */
-       ipv6_addr_copy(&addr->v6.sin6_addr, &param->v6.addr);
+       addr->v6.sin6_addr = param->v6.addr;
        addr->v6.sin6_scope_id = iif;
 }
 
@@ -493,7 +491,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr,
 
        param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS;
        param->v6.param_hdr.length = htons(length);
-       ipv6_addr_copy(&param->v6.addr, &addr->v6.sin6_addr);
+       param->v6.addr = addr->v6.sin6_addr;
 
        return length;
 }
@@ -504,7 +502,7 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
 {
        addr->sa.sa_family = AF_INET6;
        addr->v6.sin6_port = port;
-       ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
+       addr->v6.sin6_addr = *saddr;
 }
 
 /* Compare addresses exactly.
@@ -759,7 +757,7 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event,
                }
 
                sin6from = &asoc->peer.primary_addr.v6;
-               ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr);
+               sin6->sin6_addr = sin6from->sin6_addr;
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                        sin6->sin6_scope_id = sin6from->sin6_scope_id;
        }
@@ -787,7 +785,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
                }
 
                /* Otherwise, just copy the v6 address. */
-               ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+               sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
                        struct sctp_ulpevent *ev = sctp_skb2event(skb);
                        sin6->sin6_scope_id = ev->iif;
index 08b3cead6503c62f91dc8e97d9b817de7a79ffb9..817174eb5f41a50147dddded99bf222e421001b6 100644 (file)
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
        /* Keep track of how many bytes are in flight to the receiver. */
        asoc->outqueue.outstanding_bytes += datasize;
 
-       /* Update our view of the receiver's rwnd. Include sk_buff overhead
-        * while updating peer.rwnd so that it reduces the chances of a
-        * receiver running out of receive buffer space even when receive
-        * window is still open. This can happen when a sender is sending
-        * sending small messages.
-        */
-       datasize += sizeof(struct sk_buff);
+       /* Update our view of the receiver's rwnd. */
        if (datasize < rwnd)
                rwnd -= datasize;
        else
index 14c2b06028ffb1bea3acde6243ff9386d053616e..cfeb1d4a1ee6ca730595959946ced911b7442baa 100644 (file)
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                                        chunk->transport->flight_size -=
                                                        sctp_data_size(chunk);
                                q->outstanding_bytes -= sctp_data_size(chunk);
-                               q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                                       sizeof(struct sk_buff));
+                               q->asoc->peer.rwnd += sctp_data_size(chunk);
                        }
                        continue;
                }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                               sizeof(struct sk_buff));
+                       q->asoc->peer.rwnd += sctp_data_size(chunk);
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        if (chunk->transport)
                                transport->flight_size -= sctp_data_size(chunk);
index 61b9fca5a173bba9057f9a09dc2ac6cf45f34bc9..5942d27b1444c71dc4c7af0c591ec3b1b181a92d 100644 (file)
@@ -637,7 +637,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                    " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
                    addrw);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                /* Now we send an ASCONF for each association */
                /* Note. we currently don't handle link local IPv6 addressees */
                if (addrw->a.sa.sa_family == AF_INET6) {
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
+       /* Initialize maximum autoclose timeout. */
+       sctp_max_autoclose              = INT_MAX / HZ;
+
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
index 0121e0ab035167d468c2976f17f210b2cb98a083..a85eeeb55dd0022e009895c53a6d8593929b7691 100644 (file)
@@ -3400,8 +3400,10 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
                asconf_len -= length;
        }
 
-       if (no_err && asoc->src_out_of_asoc_ok)
+       if (no_err && asoc->src_out_of_asoc_ok) {
                asoc->src_out_of_asoc_ok = 0;
+               sctp_transport_immediate_rtx(asoc->peer.primary_path);
+       }
 
        /* Free the cached last sent asconf chunk. */
        list_del_init(&asconf->transmitted_list);
index 76388b083f283a2e2c62ab3f605ee8a12469454f..1ff51c9d18d5d5ca925da9bd24544fe46014ad7b 100644 (file)
@@ -666,6 +666,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
                                  struct sctp_chunk *chunk)
 {
        sctp_sender_hb_info_t *hbinfo;
+       int was_unconfirmed = 0;
 
        /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
         * HEARTBEAT should clear the error counter of the destination
@@ -692,9 +693,11 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        /* Mark the destination transport address as active if it is not so
         * marked.
         */
-       if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
+       if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
+               was_unconfirmed = 1;
                sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
                                             SCTP_HEARTBEAT_SUCCESS);
+       }
 
        /* The receiver of the HEARTBEAT ACK should also perform an
         * RTT measurement for that destination transport address
@@ -712,6 +715,9 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        /* Update the heartbeat timer.  */
        if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
                sctp_transport_hold(t);
+
+       if (was_unconfirmed && asoc->peer.transport_count == 1)
+               sctp_transport_immediate_rtx(t);
 }
 
 
index 13bf5fcdbff1b9f80d2d0c6288ab98762d499e74..408ebd0e73305b56b76dc139af29611742c8e82c 100644 (file)
@@ -804,7 +804,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
                                struct sockaddr_in6 *sin6;
 
                                sin6 = (struct sockaddr_in6 *)addrs;
-                               ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
+                               asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
                        }
                        SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
                            " at %p\n", asoc, asoc->asconf_addr_del_pending,
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EINVAL;
        if (copy_from_user(&sp->autoclose, optval, optlen))
                return -EFAULT;
-       /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-       sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
        return 0;
 }
@@ -6841,7 +6839,7 @@ struct proto sctp_prot = {
        .sockets_allocated = &sctp_sockets_allocated,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 struct proto sctpv6_prot = {
        .name           = "SCTPv6",
@@ -6872,4 +6870,4 @@ struct proto sctpv6_prot = {
        .memory_allocated = &sctp_memory_allocated,
        .sockets_allocated = &sctp_sockets_allocated,
 };
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
index 6b3952961b858369d8a63b908b0911357a9e28e7..60ffbd067ff75643ac3f5cc61e4ba20c2b8ef3b9 100644 (file)
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+       (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+       ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
                .extra1         = &one,
                .extra2         = &rwnd_scale_max,
        },
+       {
+               .procname       = "max_autoclose",
+               .data           = &sctp_max_autoclose,
+               .maxlen         = sizeof(unsigned long),
+               .mode           = 0644,
+               .proc_handler   = &proc_doulongvec_minmax,
+               .extra1         = &max_autoclose_min,
+               .extra2         = &max_autoclose_max,
+       },
 
        { /* sentinel */ }
 };
index 394c57ca2f54210e4060654fb72f1937aee3bc75..3889330b7b04c06f33338be44bbf4effcf518827 100644 (file)
@@ -641,3 +641,19 @@ void sctp_transport_reset(struct sctp_transport *t)
        t->cacc.next_tsn_at_change = 0;
        t->cacc.cacc_saw_newack = 0;
 }
+
+/* Schedule retransmission on the given transport */
+void sctp_transport_immediate_rtx(struct sctp_transport *t)
+{
+       /* Stop pending T3_rtx_timer */
+       if (timer_pending(&t->T3_rtx_timer)) {
+               (void)del_timer(&t->T3_rtx_timer);
+               sctp_transport_put(t);
+       }
+       sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
+       if (!timer_pending(&t->T3_rtx_timer)) {
+               if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
+                       sctp_transport_hold(t);
+       }
+       return;
+}
index 425ef42704605e83c0f366fc1edb000d18eeb419..2cad581318feba43bb84727fab1c59bb190f3bfe 100644 (file)
@@ -551,6 +551,8 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
 
        sock_update_classid(sock->sk);
 
+       sock_update_netprioidx(sock->sk);
+
        si->sock = sock;
        si->scm = NULL;
        si->msg = msg;
@@ -2756,10 +2758,10 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
        case ETHTOOL_GRXRINGS:
        case ETHTOOL_GRXCLSRLCNT:
        case ETHTOOL_GRXCLSRULE:
+       case ETHTOOL_SRXCLSRLINS:
                convert_out = true;
                /* fall through */
        case ETHTOOL_SRXCLSRLDEL:
-       case ETHTOOL_SRXCLSRLINS:
                buf_size += sizeof(struct ethtool_rxnfc);
                convert_in = true;
                break;
index 67a655ee82a94060ef5e0760c3aec61e7bc52c9e..ee77742e0ed6d3dcc89401808028b0228a666aaf 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
                                  char *buf, const int buflen)
@@ -91,7 +91,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
        return len;
 }
 
-#else  /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#else  /* !IS_ENABLED(CONFIG_IPV6) */
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
                                  char *buf, const int buflen)
@@ -105,7 +105,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
        return 0;
 }
 
-#endif /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#endif /* !IS_ENABLED(CONFIG_IPV6) */
 
 static int rpc_ntop4(const struct sockaddr *sap,
                     char *buf, const size_t buflen)
@@ -155,7 +155,7 @@ static size_t rpc_pton4(const char *buf, const size_t buflen,
        return sizeof(struct sockaddr_in);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int rpc_parse_scope_id(const char *buf, const size_t buflen,
                              const char *delim, struct sockaddr_in6 *sin6)
 {
index d12ffa5458115e3912f8b7f22a337963d8d3213c..00a1a2acd587681adf4e904d9cfddbd05f0251e1 100644 (file)
@@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task)
        task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 }
 
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+       /* Initialize retry counters */
+       task->tk_garb_retry = 2;
+       task->tk_cred_retry = 2;
+       task->tk_rebind_retry = 2;
+
+       /* starting timestamp */
+       task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+       task->tk_timeouts = 0;
+       task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+       rpc_init_task_statistics(task);
+}
+
 /*
  * Helper that calls task->tk_ops->rpc_call_done if it exists
  */
@@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task)
                        WARN_ON(RPC_ASSASSINATED(task));
                        /* Always release the RPC slot and buffer memory */
                        xprt_release(task);
+                       rpc_reset_task_statistics(task);
                }
        }
 }
@@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        task->tk_calldata = task_setup_data->callback_data;
        INIT_LIST_HEAD(&task->tk_task);
 
-       /* Initialize retry counters */
-       task->tk_garb_retry = 2;
-       task->tk_cred_retry = 2;
-       task->tk_rebind_retry = 2;
-
        task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
        task->tk_owner = current->tgid;
 
@@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        if (task->tk_ops->rpc_call_prepare != NULL)
                task->tk_action = rpc_prepare_task;
 
-       /* starting timestamp */
-       task->tk_start = ktime_get();
+       rpc_init_task_statistics(task);
 
        dprintk("RPC:       new task initialized, procpid %u\n",
                                task_pid_nr(current));
index 6e038884ae0c1760178cf7647cc7ad3156331f5e..9d01d46b05f36785e0612786a2365be80a08dc0d 100644 (file)
@@ -826,7 +826,7 @@ static int __svc_rpcb_register4(const u32 program, const u32 version,
        return error;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /*
  * Register an "inet6" protocol family netid with the local
  * rpcbind daemon via an rpcbind v4 SET request.
@@ -872,7 +872,7 @@ static int __svc_rpcb_register6(const u32 program, const u32 version,
 
        return error;
 }
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Register a kernel RPC service via rpcbind version 4.
@@ -893,11 +893,11 @@ static int __svc_register(const char *progname,
                error = __svc_rpcb_register4(program, version,
                                                protocol, port);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                error = __svc_rpcb_register6(program, version,
                                                protocol, port);
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        }
 
        if (error < 0)
index 447cd0eb415c095fb58958b24e5353d17667c2d0..38649cfa4e81350aed397d3bdeb316556aaf57e6 100644 (file)
@@ -179,13 +179,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                .sin_addr.s_addr        = htonl(INADDR_ANY),
                .sin_port               = htons(port),
        };
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct sockaddr_in6 sin6 = {
                .sin6_family            = AF_INET6,
                .sin6_addr              = IN6ADDR_ANY_INIT,
                .sin6_port              = htons(port),
        };
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        struct sockaddr *sap;
        size_t len;
 
@@ -194,12 +194,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                sap = (struct sockaddr *)&sin;
                len = sizeof(sin);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                sap = (struct sockaddr *)&sin6;
                len = sizeof(sin6);
                break;
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        default:
                return ERR_PTR(-EAFNOSUPPORT);
        }
index ce136323da8b1540f8618348b851c86d7b8a6369..01153ead1dbaf3d982e4d57bfacccce674b507a4 100644 (file)
@@ -134,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
        struct ip_map *item = container_of(citem, struct ip_map, h);
 
        strcpy(new->m_class, item->m_class);
-       ipv6_addr_copy(&new->m_addr, &item->m_addr);
+       new->m_addr = item->m_addr;
 }
 static void update(struct cache_head *cnew, struct cache_head *citem)
 {
@@ -220,7 +220,7 @@ static int ip_map_parse(struct cache_detail *cd,
                ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
                                &sin6.sin6_addr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                memcpy(&sin6, &address.s6, sizeof(sin6));
                break;
@@ -274,7 +274,7 @@ static int ip_map_show(struct seq_file *m,
        }
        im = container_of(h, struct ip_map, h);
        /* class addr domain */
-       ipv6_addr_copy(&addr, &im->m_addr);
+       addr = im->m_addr;
 
        if (test_bit(CACHE_VALID, &h->flags) &&
            !test_bit(CACHE_NEGATIVE, &h->flags))
@@ -297,7 +297,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
        struct cache_head *ch;
 
        strcpy(ip.m_class, class);
-       ipv6_addr_copy(&ip.m_addr, addr);
+       ip.m_addr = *addr;
        ch = sunrpc_cache_lookup(cd, &ip.h,
                                 hash_str(class, IP_HASHBITS) ^
                                 hash_ip6(*addr));
index 71bed1c1c77a168da2299a1a009fa27e83253b1e..4653286fcc9e685e1d1999c532325ce48870d4f8 100644 (file)
@@ -157,7 +157,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
                        cmh->cmsg_level = SOL_IPV6;
                        cmh->cmsg_type = IPV6_PKTINFO;
                        pki->ipi6_ifindex = daddr->sin6_scope_id;
-                       ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
+                       pki->ipi6_addr = daddr->sin6_addr;
                        cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
                }
                break;
@@ -523,7 +523,7 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
                return 0;
 
        daddr->sin6_family = AF_INET6;
-       ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
+       daddr->sin6_addr = pki->ipi6_addr;
        daddr->sin6_scope_id = pki->ipi6_ifindex;
        return 1;
 }
index f4385e45a5fcb398086c6c8c5c8ab4daff7ee139..c64c0ef519b594320ff688f3881579d2926be21d 100644 (file)
@@ -995,13 +995,11 @@ out_init_req:
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
-       if (xprt_dynamic_free_slot(xprt, req))
-               return;
-
-       memset(req, 0, sizeof(*req));   /* mark unused */
-
        spin_lock(&xprt->reserve_lock);
-       list_add(&req->rq_list, &xprt->free);
+       if (!xprt_dynamic_free_slot(xprt, req)) {
+               memset(req, 0, sizeof(*req));   /* mark unused */
+               list_add(&req->rq_list, &xprt->free);
+       }
        rpc_wake_up_next(&xprt->backlog);
        spin_unlock(&xprt->reserve_lock);
 }
index d7f97ef265904f0ddc0d691360f1e920dc63210b..55472c48825e6fd43c3357a2f58b398231a2767e 100644 (file)
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
-       int ret = 0;
+       int ret = -EAGAIN;
 
        dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
                        task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
        /* Don't race with disconnect */
        if (xprt_connected(xprt)) {
                if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
-                       ret = -EAGAIN;
                        /*
                         * Notify TCP that we're limited by the application
                         * window size
@@ -2530,8 +2529,10 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
                int err;
                err = xs_init_anyaddr(args->dstaddr->sa_family,
                                        (struct sockaddr *)&new->srcaddr);
-               if (err != 0)
+               if (err != 0) {
+                       xprt_free(xprt);
                        return ERR_PTR(err);
+               }
        }
 
        return xprt;
index 28908f54459e2d4e0a95f5760eeaa179971658cc..8eb87b11d10050f7497498e4776064b39c216be9 100644 (file)
@@ -46,7 +46,7 @@
 #define BCLINK_WIN_DEFAULT 20          /* bcast link window size (default) */
 
 /**
- * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
  * @primary: pointer to primary bearer
  * @secondary: pointer to secondary bearer
  *
  * to be paired.
  */
 
-struct bcbearer_pair {
+struct tipc_bcbearer_pair {
        struct tipc_bearer *primary;
        struct tipc_bearer *secondary;
 };
 
 /**
- * struct bcbearer - bearer used by broadcast link
+ * struct tipc_bcbearer - bearer used by broadcast link
  * @bearer: (non-standard) broadcast bearer structure
  * @media: (non-standard) broadcast media structure
  * @bpairs: array of bearer pairs
@@ -74,38 +74,40 @@ struct bcbearer_pair {
  * prevented through use of the spinlock "bc_lock".
  */
 
-struct bcbearer {
+struct tipc_bcbearer {
        struct tipc_bearer bearer;
-       struct media media;
-       struct bcbearer_pair bpairs[MAX_BEARERS];
-       struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+       struct tipc_media media;
+       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
        struct tipc_node_map remains;
        struct tipc_node_map remains_new;
 };
 
 /**
- * struct bclink - link used for broadcast messages
+ * struct tipc_bclink - link used for broadcast messages
  * @link: (non-standard) broadcast link structure
  * @node: (non-standard) node structure representing b'cast link's peer node
+ * @bcast_nodes: map of broadcast-capable nodes
  * @retransmit_to: node that most recently requested a retransmit
  *
  * Handles sequence numbering, fragmentation, bundling, etc.
  */
 
-struct bclink {
-       struct link link;
+struct tipc_bclink {
+       struct tipc_link link;
        struct tipc_node node;
+       struct tipc_node_map bcast_nodes;
        struct tipc_node *retransmit_to;
 };
 
+static struct tipc_bcbearer bcast_bearer;
+static struct tipc_bclink bcast_link;
 
-static struct bcbearer *bcbearer;
-static struct bclink *bclink;
-static struct link *bcl;
-static DEFINE_SPINLOCK(bc_lock);
+static struct tipc_bcbearer *bcbearer = &bcast_bearer;
+static struct tipc_bclink *bclink = &bcast_link;
+static struct tipc_link *bcl = &bcast_link.link;
 
-/* broadcast-capable node map */
-struct tipc_node_map tipc_bcast_nmap;
+static DEFINE_SPINLOCK(bc_lock);
 
 const char tipc_bclink_name[] = "broadcast-link";
 
@@ -113,11 +115,6 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
                           struct tipc_node_map *nm_b,
                           struct tipc_node_map *nm_diff);
 
-static u32 buf_seqno(struct sk_buff *buf)
-{
-       return msg_seqno(buf_msg(buf));
-}
-
 static u32 bcbuf_acks(struct sk_buff *buf)
 {
        return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
@@ -133,6 +130,19 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
        bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
 }
 
+void tipc_bclink_add_node(u32 addr)
+{
+       spin_lock_bh(&bc_lock);
+       tipc_nmap_add(&bclink->bcast_nodes, addr);
+       spin_unlock_bh(&bc_lock);
+}
+
+void tipc_bclink_remove_node(u32 addr)
+{
+       spin_lock_bh(&bc_lock);
+       tipc_nmap_remove(&bclink->bcast_nodes, addr);
+       spin_unlock_bh(&bc_lock);
+}
 
 static void bclink_set_last_sent(void)
 {
@@ -222,14 +232,36 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        struct sk_buff *next;
        unsigned int released = 0;
 
-       if (less_eq(acked, n_ptr->bclink.acked))
-               return;
-
        spin_lock_bh(&bc_lock);
 
-       /* Skip over packets that node has previously acknowledged */
-
+       /* Bail out if tx queue is empty (no clean up is required) */
        crs = bcl->first_out;
+       if (!crs)
+               goto exit;
+
+       /* Determine which messages need to be acknowledged */
+       if (acked == INVALID_LINK_SEQ) {
+               /*
+                * Contact with specified node has been lost, so need to
+                * acknowledge sent messages only (if other nodes still exist)
+                * or both sent and unsent messages (otherwise)
+                */
+               if (bclink->bcast_nodes.count)
+                       acked = bcl->fsm_msg_cnt;
+               else
+                       acked = bcl->next_out_no;
+       } else {
+               /*
+                * Bail out if specified sequence number does not correspond
+                * to a message that has been sent and not yet acknowledged
+                */
+               if (less(acked, buf_seqno(crs)) ||
+                   less(bcl->fsm_msg_cnt, acked) ||
+                   less_eq(acked, n_ptr->bclink.acked))
+                       goto exit;
+       }
+
+       /* Skip over packets that node has previously acknowledged */
        while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
                crs = crs->next;
 
@@ -237,7 +269,15 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 
        while (crs && less_eq(buf_seqno(crs), acked)) {
                next = crs->next;
-               bcbuf_decr_acks(crs);
+
+               if (crs != bcl->next_out)
+                       bcbuf_decr_acks(crs);
+               else {
+                       bcbuf_set_acks(crs, 0);
+                       bcl->next_out = next;
+                       bclink_set_last_sent();
+               }
+
                if (bcbuf_acks(crs) == 0) {
                        bcl->first_out = next;
                        bcl->out_queue_size--;
@@ -256,6 +296,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        }
        if (unlikely(released && !list_empty(&bcl->waiting_ports)))
                tipc_link_wakeup_ports(bcl, 0);
+exit:
        spin_unlock_bh(&bc_lock);
 }
 
@@ -267,7 +308,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 
 static void bclink_send_ack(struct tipc_node *n_ptr)
 {
-       struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+       struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
 
        if (l_ptr != NULL)
                tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
@@ -402,13 +443,19 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
 
        spin_lock_bh(&bc_lock);
 
+       if (!bclink->bcast_nodes.count) {
+               res = msg_data_sz(buf_msg(buf));
+               buf_discard(buf);
+               goto exit;
+       }
+
        res = tipc_link_send_buf(bcl, buf);
-       if (likely(res > 0))
+       if (likely(res >= 0)) {
                bclink_set_last_sent();
-
-       bcl->stats.queue_sz_counts++;
-       bcl->stats.accu_queue_sz += bcl->out_queue_size;
-
+               bcl->stats.queue_sz_counts++;
+               bcl->stats.accu_queue_sz += bcl->out_queue_size;
+       }
+exit:
        spin_unlock_bh(&bc_lock);
        return res;
 }
@@ -572,13 +619,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
        if (likely(!msg_non_seq(buf_msg(buf)))) {
                struct tipc_msg *msg;
 
-               bcbuf_set_acks(buf, tipc_bcast_nmap.count);
+               bcbuf_set_acks(buf, bclink->bcast_nodes.count);
                msg = buf_msg(buf);
                msg_set_non_seq(msg, 1);
                msg_set_mc_netid(msg, tipc_net_id);
                bcl->stats.sent_info++;
 
-               if (WARN_ON(!tipc_bcast_nmap.count)) {
+               if (WARN_ON(!bclink->bcast_nodes.count)) {
                        dump_stack();
                        return 0;
                }
@@ -586,7 +633,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
 
        /* Send buffer over bearers until all targets reached */
 
-       bcbearer->remains = tipc_bcast_nmap;
+       bcbearer->remains = bclink->bcast_nodes;
 
        for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
                struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -630,8 +677,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
 
 void tipc_bcbearer_sort(void)
 {
-       struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
-       struct bcbearer_pair *bp_curr;
+       struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+       struct tipc_bcbearer_pair *bp_curr;
        int b_index;
        int pri;
 
@@ -752,25 +799,13 @@ int tipc_bclink_set_queue_limits(u32 limit)
        return 0;
 }
 
-int tipc_bclink_init(void)
+void tipc_bclink_init(void)
 {
-       bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
-       bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
-       if (!bcbearer || !bclink) {
-               warn("Broadcast link creation failed, no memory\n");
-               kfree(bcbearer);
-               bcbearer = NULL;
-               kfree(bclink);
-               bclink = NULL;
-               return -ENOMEM;
-       }
-
        INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
        bcbearer->bearer.media = &bcbearer->media;
        bcbearer->media.send_msg = tipc_bcbearer_send;
        sprintf(bcbearer->media.name, "tipc-broadcast");
 
-       bcl = &bclink->link;
        INIT_LIST_HEAD(&bcl->waiting_ports);
        bcl->next_out_no = 1;
        spin_lock_init(&bclink->node.lock);
@@ -780,22 +815,16 @@ int tipc_bclink_init(void)
        bcl->b_ptr = &bcbearer->bearer;
        bcl->state = WORKING_WORKING;
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
-
-       return 0;
 }
 
 void tipc_bclink_stop(void)
 {
        spin_lock_bh(&bc_lock);
-       if (bcbearer) {
-               tipc_link_stop(bcl);
-               bcl = NULL;
-               kfree(bclink);
-               bclink = NULL;
-               kfree(bcbearer);
-               bcbearer = NULL;
-       }
+       tipc_link_stop(bcl);
        spin_unlock_bh(&bc_lock);
+
+       memset(bclink, 0, sizeof(*bclink));
+       memset(bcbearer, 0, sizeof(*bcbearer));
 }
 
 
@@ -864,9 +893,9 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
  * tipc_port_list_add - add a port to a port list, ensuring no duplicates
  */
 
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
 {
-       struct port_list *item = pl_ptr;
+       struct tipc_port_list *item = pl_ptr;
        int i;
        int item_sz = PLSIZE;
        int cnt = pl_ptr->count;
@@ -898,10 +927,10 @@ void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
  *
  */
 
-void tipc_port_list_free(struct port_list *pl_ptr)
+void tipc_port_list_free(struct tipc_port_list *pl_ptr)
 {
-       struct port_list *item;
-       struct port_list *next;
+       struct tipc_port_list *item;
+       struct tipc_port_list *next;
 
        for (item = pl_ptr->next; item; item = next) {
                next = item->next;
index 06740da5ae614b30e10f3c3d938cc2ced8283b34..b009666c60b0e5464ac1bdaaab031289fd8b7597 100644 (file)
@@ -51,20 +51,18 @@ struct tipc_node_map {
        u32 map[MAX_NODES / WSIZE];
 };
 
-extern struct tipc_node_map tipc_bcast_nmap;
-
 #define PLSIZE 32
 
 /**
- * struct port_list - set of node local destination ports
+ * struct tipc_port_list - set of node local destination ports
  * @count: # of ports in set (only valid for first entry in list)
  * @next: pointer to next entry in list
  * @ports: array of port references
  */
 
-struct port_list {
+struct tipc_port_list {
        int count;
-       struct port_list *next;
+       struct tipc_port_list *next;
        u32 ports[PLSIZE];
 };
 
@@ -85,11 +83,13 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
        return !memcmp(nm_a, nm_b, sizeof(*nm_a));
 }
 
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port);
-void tipc_port_list_free(struct port_list *pl_ptr);
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
+void tipc_port_list_free(struct tipc_port_list *pl_ptr);
 
-int  tipc_bclink_init(void);
+void tipc_bclink_init(void);
 void tipc_bclink_stop(void);
+void tipc_bclink_add_node(u32 addr);
+void tipc_bclink_remove_node(u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(void);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
 int  tipc_bclink_send_msg(struct sk_buff *buf);
index e2202de3d93e3cc8f1d836d1c51b430bfb28fd8f..329fb659fae4c2b89d4c0830466dab297906dc8c 100644 (file)
@@ -41,7 +41,7 @@
 
 #define MAX_ADDR_STR 32
 
-static struct media media_list[MAX_MEDIA];
+static struct tipc_media *media_list[MAX_MEDIA];
 static u32 media_count;
 
 struct tipc_bearer tipc_bearers[MAX_BEARERS];
@@ -65,17 +65,31 @@ static int media_name_valid(const char *name)
 }
 
 /**
- * media_find - locates specified media object by name
+ * tipc_media_find - locates specified media object by name
  */
 
-static struct media *media_find(const char *name)
+struct tipc_media *tipc_media_find(const char *name)
 {
-       struct media *m_ptr;
        u32 i;
 
-       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
-               if (!strcmp(m_ptr->name, name))
-                       return m_ptr;
+       for (i = 0; i < media_count; i++) {
+               if (!strcmp(media_list[i]->name, name))
+                       return media_list[i];
+       }
+       return NULL;
+}
+
+/**
+ * media_find_id - locates specified media object by type identifier
+ */
+
+static struct tipc_media *media_find_id(u8 type)
+{
+       u32 i;
+
+       for (i = 0; i < media_count; i++) {
+               if (media_list[i]->type_id == type)
+                       return media_list[i];
        }
        return NULL;
 }
@@ -86,87 +100,34 @@ static struct media *media_find(const char *name)
  * Bearers for this media type must be activated separately at a later stage.
  */
 
-int  tipc_register_media(u32 media_type,
-                        char *name,
-                        int (*enable)(struct tipc_bearer *),
-                        void (*disable)(struct tipc_bearer *),
-                        int (*send_msg)(struct sk_buff *,
-                                        struct tipc_bearer *,
-                                        struct tipc_media_addr *),
-                        char *(*addr2str)(struct tipc_media_addr *a,
-                                          char *str_buf, int str_size),
-                        struct tipc_media_addr *bcast_addr,
-                        const u32 bearer_priority,
-                        const u32 link_tolerance,  /* [ms] */
-                        const u32 send_window_limit)
+int tipc_register_media(struct tipc_media *m_ptr)
 {
-       struct media *m_ptr;
-       u32 media_id;
-       u32 i;
        int res = -EINVAL;
 
        write_lock_bh(&tipc_net_lock);
 
-       if (tipc_mode != TIPC_NET_MODE) {
-               warn("Media <%s> rejected, not in networked mode yet\n", name);
+       if (!media_name_valid(m_ptr->name))
                goto exit;
-       }
-       if (!media_name_valid(name)) {
-               warn("Media <%s> rejected, illegal name\n", name);
+       if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
+           !m_ptr->bcast_addr.broadcast)
                goto exit;
-       }
-       if (!bcast_addr) {
-               warn("Media <%s> rejected, no broadcast address\n", name);
+       if (m_ptr->priority > TIPC_MAX_LINK_PRI)
                goto exit;
-       }
-       if ((bearer_priority < TIPC_MIN_LINK_PRI) ||
-           (bearer_priority > TIPC_MAX_LINK_PRI)) {
-               warn("Media <%s> rejected, illegal priority (%u)\n", name,
-                    bearer_priority);
+       if ((m_ptr->tolerance < TIPC_MIN_LINK_TOL) ||
+           (m_ptr->tolerance > TIPC_MAX_LINK_TOL))
                goto exit;
-       }
-       if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
-           (link_tolerance > TIPC_MAX_LINK_TOL)) {
-               warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
-                    link_tolerance);
+       if (media_count >= MAX_MEDIA)
                goto exit;
-       }
-
-       media_id = media_count++;
-       if (media_id >= MAX_MEDIA) {
-               warn("Media <%s> rejected, media limit reached (%u)\n", name,
-                    MAX_MEDIA);
-               media_count--;
+       if (tipc_media_find(m_ptr->name) || media_find_id(m_ptr->type_id))
                goto exit;
-       }
-       for (i = 0; i < media_id; i++) {
-               if (media_list[i].type_id == media_type) {
-                       warn("Media <%s> rejected, duplicate type (%u)\n", name,
-                            media_type);
-                       media_count--;
-                       goto exit;
-               }
-               if (!strcmp(name, media_list[i].name)) {
-                       warn("Media <%s> rejected, duplicate name\n", name);
-                       media_count--;
-                       goto exit;
-               }
-       }
 
-       m_ptr = &media_list[media_id];
-       m_ptr->type_id = media_type;
-       m_ptr->send_msg = send_msg;
-       m_ptr->enable_bearer = enable;
-       m_ptr->disable_bearer = disable;
-       m_ptr->addr2str = addr2str;
-       memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
-       strcpy(m_ptr->name, name);
-       m_ptr->priority = bearer_priority;
-       m_ptr->tolerance = link_tolerance;
-       m_ptr->window = send_window_limit;
+       media_list[media_count] = m_ptr;
+       media_count++;
        res = 0;
 exit:
        write_unlock_bh(&tipc_net_lock);
+       if (res)
+               warn("Media <%s> registration error\n", m_ptr->name);
        return res;
 }
 
@@ -176,27 +137,19 @@ exit:
 
 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
 {
-       struct media *m_ptr;
-       u32 media_type;
-       u32 i;
+       char addr_str[MAX_ADDR_STR];
+       struct tipc_media *m_ptr;
 
-       media_type = ntohl(a->type);
-       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
-               if (m_ptr->type_id == media_type)
-                       break;
-       }
-
-       if ((i < media_count) && (m_ptr->addr2str != NULL)) {
-               char addr_str[MAX_ADDR_STR];
+       m_ptr = media_find_id(a->media_id);
 
-               tipc_printf(pb, "%s(%s)", m_ptr->name,
-                           m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
-       else {
-               unchar *addr = (unchar *)&a->dev_addr;
+       if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
+               tipc_printf(pb, "%s(%s)", m_ptr->name, addr_str);
+       else {
+               u32 i;
 
-               tipc_printf(pb, "UNKNOWN(%u)", media_type);
-               for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++)
-                       tipc_printf(pb, "-%02x", addr[i]);
+               tipc_printf(pb, "UNKNOWN(%u)", a->media_id);
+               for (i = 0; i < sizeof(a->value); i++)
+                       tipc_printf(pb, "-%02x", a->value[i]);
        }
 }
 
@@ -207,7 +160,6 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
 struct sk_buff *tipc_media_get_names(void)
 {
        struct sk_buff *buf;
-       struct media *m_ptr;
        int i;
 
        buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
@@ -215,9 +167,10 @@ struct sk_buff *tipc_media_get_names(void)
                return NULL;
 
        read_lock_bh(&tipc_net_lock);
-       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
-               tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
-                                   strlen(m_ptr->name) + 1);
+       for (i = 0; i < media_count; i++) {
+               tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME,
+                                   media_list[i]->name,
+                                   strlen(media_list[i]->name) + 1);
        }
        read_unlock_bh(&tipc_net_lock);
        return buf;
@@ -232,7 +185,7 @@ struct sk_buff *tipc_media_get_names(void)
  */
 
 static int bearer_name_validate(const char *name,
-                               struct bearer_name *name_parts)
+                               struct tipc_bearer_names *name_parts)
 {
        char name_copy[TIPC_MAX_BEARER_NAME];
        char *media_name;
@@ -276,10 +229,10 @@ static int bearer_name_validate(const char *name,
 }
 
 /**
- * bearer_find - locates bearer object with matching bearer name
+ * tipc_bearer_find - locates bearer object with matching bearer name
  */
 
-static struct tipc_bearer *bearer_find(const char *name)
+struct tipc_bearer *tipc_bearer_find(const char *name)
 {
        struct tipc_bearer *b_ptr;
        u32 i;
@@ -318,7 +271,6 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
 struct sk_buff *tipc_bearer_get_names(void)
 {
        struct sk_buff *buf;
-       struct media *m_ptr;
        struct tipc_bearer *b_ptr;
        int i, j;
 
@@ -327,10 +279,10 @@ struct sk_buff *tipc_bearer_get_names(void)
                return NULL;
 
        read_lock_bh(&tipc_net_lock);
-       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+       for (i = 0; i < media_count; i++) {
                for (j = 0; j < MAX_BEARERS; j++) {
                        b_ptr = &tipc_bearers[j];
-                       if (b_ptr->active && (b_ptr->media == m_ptr)) {
+                       if (b_ptr->active && (b_ptr->media == media_list[i])) {
                                tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
                                                    b_ptr->name,
                                                    strlen(b_ptr->name) + 1);
@@ -366,7 +318,7 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
 static int bearer_push(struct tipc_bearer *b_ptr)
 {
        u32 res = 0;
-       struct link *ln, *tln;
+       struct tipc_link *ln, *tln;
 
        if (b_ptr->blocked)
                return 0;
@@ -412,7 +364,8 @@ void tipc_continue(struct tipc_bearer *b_ptr)
  * bearer.lock is busy
  */
 
-static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr)
+static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
+                                               struct tipc_link *l_ptr)
 {
        list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
 }
@@ -425,7 +378,7 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link
  * bearer.lock is free
  */
 
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
 {
        spin_lock_bh(&b_ptr->lock);
        tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
@@ -438,7 +391,8 @@ void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
  * and if there is, try to resolve it before returning.
  * 'tipc_net_lock' is read_locked when this function is called
  */
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
+                                       struct tipc_link *l_ptr)
 {
        int res = 1;
 
@@ -457,7 +411,7 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr
  * tipc_bearer_congested - determines if bearer is currently congested
  */
 
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
 {
        if (unlikely(b_ptr->blocked))
                return 1;
@@ -473,8 +427,8 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
 int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
 {
        struct tipc_bearer *b_ptr;
-       struct media *m_ptr;
-       struct bearer_name b_name;
+       struct tipc_media *m_ptr;
+       struct tipc_bearer_names b_names;
        char addr_string[16];
        u32 bearer_id;
        u32 with_this_prio;
@@ -486,7 +440,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
                     name);
                return -ENOPROTOOPT;
        }
-       if (!bearer_name_validate(name, &b_name)) {
+       if (!bearer_name_validate(name, &b_names)) {
                warn("Bearer <%s> rejected, illegal name\n", name);
                return -EINVAL;
        }
@@ -511,10 +465,10 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
 
        write_lock_bh(&tipc_net_lock);
 
-       m_ptr = media_find(b_name.media_name);
+       m_ptr = tipc_media_find(b_names.media_name);
        if (!m_ptr) {
                warn("Bearer <%s> rejected, media <%s> not registered\n", name,
-                    b_name.media_name);
+                    b_names.media_name);
                goto exit;
        }
 
@@ -561,6 +515,8 @@ restart:
 
        b_ptr->identity = bearer_id;
        b_ptr->media = m_ptr;
+       b_ptr->tolerance = m_ptr->tolerance;
+       b_ptr->window = m_ptr->window;
        b_ptr->net_plane = bearer_id + 'A';
        b_ptr->active = 1;
        b_ptr->priority = priority;
@@ -590,11 +546,11 @@ exit:
 int tipc_block_bearer(const char *name)
 {
        struct tipc_bearer *b_ptr = NULL;
-       struct link *l_ptr;
-       struct link *temp_l_ptr;
+       struct tipc_link *l_ptr;
+       struct tipc_link *temp_l_ptr;
 
        read_lock_bh(&tipc_net_lock);
-       b_ptr = bearer_find(name);
+       b_ptr = tipc_bearer_find(name);
        if (!b_ptr) {
                warn("Attempt to block unknown bearer <%s>\n", name);
                read_unlock_bh(&tipc_net_lock);
@@ -625,8 +581,8 @@ int tipc_block_bearer(const char *name)
 
 static void bearer_disable(struct tipc_bearer *b_ptr)
 {
-       struct link *l_ptr;
-       struct link *temp_l_ptr;
+       struct tipc_link *l_ptr;
+       struct tipc_link *temp_l_ptr;
 
        info("Disabling bearer <%s>\n", b_ptr->name);
        spin_lock_bh(&b_ptr->lock);
@@ -648,7 +604,7 @@ int tipc_disable_bearer(const char *name)
        int res;
 
        write_lock_bh(&tipc_net_lock);
-       b_ptr = bearer_find(name);
+       b_ptr = tipc_bearer_find(name);
        if (b_ptr == NULL) {
                warn("Attempt to disable unknown bearer <%s>\n", name);
                res = -EINVAL;
index d696f9e414e34ef4ef9798d4bfc143eef4f4e892..d3eac56b8c21abd1c0f0c0979f7c9f2a03261f44 100644 (file)
 #define MAX_BEARERS    2
 #define MAX_MEDIA      2
 
+/*
+ * Identifiers associated with TIPC message header media address info
+ *
+ * - address info field is 20 bytes long
+ * - media type identifier located at offset 3
+ * - remaining bytes vary according to media type
+ */
+
+#define TIPC_MEDIA_ADDR_SIZE   20
+#define TIPC_MEDIA_TYPE_OFFSET 3
+
 /*
  * Identifiers of supported TIPC media types
  */
 #define TIPC_MEDIA_TYPE_ETH    1
 
 /*
- * Destination address structure used by TIPC bearers when sending messages
- *
- * IMPORTANT: The fields of this structure MUST be stored using the specified
- * byte order indicated below, as the structure is exchanged between nodes
- * as part of a link setup process.
+ * struct tipc_media_addr - destination address used by TIPC bearers
+ * @value: address info (format defined by media)
+ * @media_id: TIPC media type identifier
+ * @broadcast: non-zero if address is a broadcast address
  */
+
 struct tipc_media_addr {
-       __be32  type;                   /* bearer type (network byte order) */
-       union {
-               __u8   eth_addr[6];     /* 48 bit Ethernet addr (byte array) */
-       } dev_addr;
+       u8 value[TIPC_MEDIA_ADDR_SIZE];
+       u8 media_id;
+       u8 broadcast;
 };
 
 struct tipc_bearer;
 
 /**
- * struct media - TIPC media information available to internal users
+ * struct tipc_media - TIPC media information available to internal users
  * @send_msg: routine which handles buffer transmission
  * @enable_bearer: routine which enables a bearer
  * @disable_bearer: routine which disables a bearer
- * @addr2str: routine which converts bearer's address to string form
+ * @addr2str: routine which converts media address to string
+ * @str2addr: routine which converts media address from string
+ * @addr2msg: routine which converts media address to protocol message area
+ * @msg2addr: routine which converts media address from protocol message area
  * @bcast_addr: media address used in broadcasting
  * @priority: default link (and bearer) priority
  * @tolerance: default time (in ms) before declaring link failure
@@ -77,14 +90,16 @@ struct tipc_bearer;
  * @name: media name
  */
 
-struct media {
+struct tipc_media {
        int (*send_msg)(struct sk_buff *buf,
                        struct tipc_bearer *b_ptr,
                        struct tipc_media_addr *dest);
        int (*enable_bearer)(struct tipc_bearer *b_ptr);
        void (*disable_bearer)(struct tipc_bearer *b_ptr);
-       char *(*addr2str)(struct tipc_media_addr *a,
-                         char *str_buf, int str_size);
+       int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
+       int (*str2addr)(struct tipc_media_addr *a, char *str_buf);
+       int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
+       int (*msg2addr)(struct tipc_media_addr *a, char *msg_area);
        struct tipc_media_addr bcast_addr;
        u32 priority;
        u32 tolerance;
@@ -103,6 +118,8 @@ struct media {
  * @name: bearer name (format = media:interface)
  * @media: ptr to media structure associated with bearer
  * @priority: default link priority for bearer
+ * @window: default window size for bearer
+ * @tolerance: default link tolerance for bearer
  * @identity: array index of this bearer within TIPC bearer array
  * @link_req: ptr to (optional) structure making periodic link setup requests
  * @links: list of non-congested links associated with bearer
@@ -122,10 +139,12 @@ struct tipc_bearer {
        struct tipc_media_addr addr;            /* initalized by media */
        char name[TIPC_MAX_BEARER_NAME];
        spinlock_t lock;
-       struct media *media;
+       struct tipc_media *media;
        u32 priority;
+       u32 window;
+       u32 tolerance;
        u32 identity;
-       struct link_req *link_req;
+       struct tipc_link_req *link_req;
        struct list_head links;
        struct list_head cong_links;
        int active;
@@ -133,28 +152,19 @@ struct tipc_bearer {
        struct tipc_node_map nodes;
 };
 
-struct bearer_name {
+struct tipc_bearer_names {
        char media_name[TIPC_MAX_MEDIA_NAME];
        char if_name[TIPC_MAX_IF_NAME];
 };
 
-struct link;
+struct tipc_link;
 
 extern struct tipc_bearer tipc_bearers[];
 
 /*
  * TIPC routines available to supported media types
  */
-int tipc_register_media(u32 media_type,
-                char *media_name, int (*enable)(struct tipc_bearer *),
-                void (*disable)(struct tipc_bearer *),
-                int (*send_msg)(struct sk_buff *,
-                       struct tipc_bearer *, struct tipc_media_addr *),
-                char *(*addr2str)(struct tipc_media_addr *a,
-                       char *str_buf, int str_size),
-                struct tipc_media_addr *bcast_addr, const u32 bearer_priority,
-                const u32 link_tolerance,  /* [ms] */
-                const u32 send_window_limit);
+int tipc_register_media(struct tipc_media *m_ptr);
 
 void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
 
@@ -170,16 +180,21 @@ int tipc_disable_bearer(const char *name);
 int  tipc_eth_media_start(void);
 void tipc_eth_media_stop(void);
 
+int tipc_media_set_priority(const char *name, u32 new_value);
+int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
 
 struct sk_buff *tipc_bearer_get_names(void);
 void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
 void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr);
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
+struct tipc_bearer *tipc_bearer_find(const char *name);
 struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr);
+struct tipc_media *tipc_media_find(const char *name);
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
+                                  struct tipc_link *l_ptr);
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
 void tipc_bearer_stop(void);
 void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
 
index b25a396b7e1ea30f1e1ba01eabe2889b6a0bd0fe..4785bf26cdf4d1b1ff98a97d7d0d1559859a5c5e 100644 (file)
@@ -184,13 +184,12 @@ static struct sk_buff *cfg_set_own_addr(void)
                                                   " (cannot change node address once assigned)");
 
        /*
-        * Must release all spinlocks before calling start_net() because
-        * Linux version of TIPC calls eth_media_start() which calls
-        * register_netdevice_notifier() which may block!
-        *
-        * Temporarily releasing the lock should be harmless for non-Linux TIPC,
-        * but Linux version of eth_media_start() should really be reworked
-        * so that it can be called with spinlocks held.
+        * Must temporarily release configuration spinlock while switching into
+        * networking mode as it calls tipc_eth_media_start(), which may sleep.
+        * Releasing the lock is harmless as other locally-issued configuration
+        * commands won't occur until this one completes, and remotely-issued
+        * configuration commands can't be received until a local configuration
+        * command to enable the first bearer is received and processed.
         */
 
        spin_unlock_bh(&config_lock);
index c21331d58fdb5d5839b1f9669757e45420dcb8fd..2691cd57b8a8244b89857c043631445d55d031ab 100644 (file)
@@ -99,8 +99,8 @@ struct sk_buff *tipc_buf_acquire(u32 size)
 
 static void tipc_core_stop_net(void)
 {
-       tipc_eth_media_stop();
        tipc_net_stop();
+       tipc_eth_media_stop();
 }
 
 /**
index f2fb96e86ee8efa094274af25c402965c7c5942e..a00e5f811569fd268ab3ffaed4cbd7824d5c1d71 100644 (file)
@@ -45,7 +45,7 @@
 
 
 /**
- * struct link_req - information about an ongoing link setup request
+ * struct tipc_link_req - information about an ongoing link setup request
  * @bearer: bearer issuing requests
  * @dest: destination address for request messages
  * @domain: network domain to which links can be established
@@ -54,7 +54,7 @@
  * @timer: timer governing period between requests
  * @timer_intv: current interval between requests (in ms)
  */
-struct link_req {
+struct tipc_link_req {
        struct tipc_bearer *bearer;
        struct tipc_media_addr dest;
        u32 domain;
@@ -84,7 +84,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
                msg_set_non_seq(msg, 1);
                msg_set_dest_domain(msg, dest_domain);
                msg_set_bc_netid(msg, tipc_net_id);
-               msg_set_media_addr(msg, &b_ptr->addr);
+               b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
        }
        return buf;
 }
@@ -120,7 +120,7 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
 void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
 {
        struct tipc_node *n_ptr;
-       struct link *link;
+       struct tipc_link *link;
        struct tipc_media_addr media_addr, *addr;
        struct sk_buff *rbuf;
        struct tipc_msg *msg = buf_msg(buf);
@@ -130,12 +130,15 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
        u32 type = msg_type(msg);
        int link_fully_up;
 
-       msg_get_media_addr(msg, &media_addr);
+       media_addr.broadcast = 1;
+       b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg));
        buf_discard(buf);
 
        /* Validate discovery message from requesting node */
        if (net_id != tipc_net_id)
                return;
+       if (media_addr.broadcast)
+               return;
        if (!tipc_addr_domain_valid(dest))
                return;
        if (!tipc_addr_node_valid(orig))
@@ -215,7 +218,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
  * and is either not currently searching or is searching at a slow rate
  */
 
-static void disc_update(struct link_req *req)
+static void disc_update(struct tipc_link_req *req)
 {
        if (!req->num_nodes) {
                if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
@@ -231,7 +234,7 @@ static void disc_update(struct link_req *req)
  * @req: ptr to link request structure
  */
 
-void tipc_disc_add_dest(struct link_req *req)
+void tipc_disc_add_dest(struct tipc_link_req *req)
 {
        req->num_nodes++;
 }
@@ -241,7 +244,7 @@ void tipc_disc_add_dest(struct link_req *req)
  * @req: ptr to link request structure
  */
 
-void tipc_disc_remove_dest(struct link_req *req)
+void tipc_disc_remove_dest(struct tipc_link_req *req)
 {
        req->num_nodes--;
        disc_update(req);
@@ -252,7 +255,7 @@ void tipc_disc_remove_dest(struct link_req *req)
  * @req: ptr to link request structure
  */
 
-static void disc_send_msg(struct link_req *req)
+static void disc_send_msg(struct tipc_link_req *req)
 {
        if (!req->bearer->blocked)
                tipc_bearer_send(req->bearer, req->buf, &req->dest);
@@ -265,7 +268,7 @@ static void disc_send_msg(struct link_req *req)
  * Called whenever a link setup request timer associated with a bearer expires.
  */
 
-static void disc_timeout(struct link_req *req)
+static void disc_timeout(struct tipc_link_req *req)
 {
        int max_delay;
 
@@ -313,7 +316,7 @@ exit:
 int tipc_disc_create(struct tipc_bearer *b_ptr,
                     struct tipc_media_addr *dest, u32 dest_domain)
 {
-       struct link_req *req;
+       struct tipc_link_req *req;
 
        req = kmalloc(sizeof(*req), GFP_ATOMIC);
        if (!req)
@@ -342,7 +345,7 @@ int tipc_disc_create(struct tipc_bearer *b_ptr,
  * @req: ptr to link request structure
  */
 
-void tipc_disc_delete(struct link_req *req)
+void tipc_disc_delete(struct tipc_link_req *req)
 {
        k_cancel_timer(&req->timer);
        k_term_timer(&req->timer);
index a3af595b86cb9cc7788278afbe387bf23f2937ad..75b67c403aa3629643fd547e4245c1c6dacbc335 100644 (file)
 #ifndef _TIPC_DISCOVER_H
 #define _TIPC_DISCOVER_H
 
-struct link_req;
+struct tipc_link_req;
 
 int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
                     u32 dest_domain);
-void tipc_disc_delete(struct link_req *req);
-void tipc_disc_add_dest(struct link_req *req);
-void tipc_disc_remove_dest(struct link_req *req);
+void tipc_disc_delete(struct tipc_link_req *req);
+void tipc_disc_add_dest(struct tipc_link_req *req);
+void tipc_disc_remove_dest(struct tipc_link_req *req);
 void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
 
 #endif
index e728d4ce2a1b4bd10aa1ff375fef4a5e7b5634a9..527e3f0e165d4190f7e46eafd66138efe6c18b6c 100644 (file)
 #include "bearer.h"
 
 #define MAX_ETH_BEARERS                MAX_BEARERS
-#define ETH_LINK_PRIORITY      TIPC_DEF_LINK_PRI
-#define ETH_LINK_TOLERANCE     TIPC_DEF_LINK_TOL
-#define ETH_LINK_WINDOW                TIPC_DEF_LINK_WIN
+
+#define ETH_ADDR_OFFSET        4       /* message header offset of MAC address */
 
 /**
  * struct eth_bearer - Ethernet bearer data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ * @cleanup: work item used when disabling bearer
  */
 
 struct eth_bearer {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
+       struct work_struct cleanup;
 };
 
+static struct tipc_media eth_media_info;
 static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
 static int eth_started;
 static struct notifier_block notifier;
 
+/**
+ * eth_media_addr_set - initialize Ethernet media address structure
+ *
+ * Media-dependent "value" field stores MAC address in first 6 bytes
+ * and zeroes out the remaining bytes.
+ */
+
+static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
+{
+       memcpy(a->value, mac, ETH_ALEN);
+       memset(a->value + ETH_ALEN, 0, sizeof(a->value) - ETH_ALEN);
+       a->media_id = TIPC_MEDIA_TYPE_ETH;
+       a->broadcast = !memcmp(mac, eth_media_info.bcast_addr.value, ETH_ALEN);
+}
+
 /**
  * send_msg - send a TIPC message out over an Ethernet interface
  */
@@ -85,7 +102,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
 
        skb_reset_network_header(clone);
        clone->dev = dev;
-       dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
+       dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
                        dev->dev_addr, clone->len);
        dev_queue_xmit(clone);
        return 0;
@@ -172,22 +189,41 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        tb_ptr->usr_handle = (void *)eb_ptr;
        tb_ptr->mtu = dev->mtu;
        tb_ptr->blocked = 0;
-       tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
-       memcpy(&tb_ptr->addr.dev_addr, dev->dev_addr, ETH_ALEN);
+       eth_media_addr_set(&tb_ptr->addr, (char *)dev->dev_addr);
        return 0;
 }
 
+/**
+ * cleanup_bearer - break association between Ethernet bearer and interface
+ *
+ * This routine must be invoked from a work queue because it can sleep.
+ */
+
+static void cleanup_bearer(struct work_struct *work)
+{
+       struct eth_bearer *eb_ptr =
+               container_of(work, struct eth_bearer, cleanup);
+
+       dev_remove_pack(&eb_ptr->tipc_packet_type);
+       dev_put(eb_ptr->dev);
+       eb_ptr->dev = NULL;
+}
+
 /**
  * disable_bearer - detach TIPC bearer from an Ethernet interface
  *
- * We really should do dev_remove_pack() here, but this function can not be
- * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
- * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
+ * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
+ * then get worker thread to complete bearer cleanup.  (Can't do cleanup
+ * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
 
 static void disable_bearer(struct tipc_bearer *tb_ptr)
 {
-       ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = NULL;
+       struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
+
+       eb_ptr->bearer = NULL;
+       INIT_WORK(&eb_ptr->cleanup, cleanup_bearer);
+       schedule_work(&eb_ptr->cleanup);
 }
 
 /**
@@ -246,17 +282,81 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
  * eth_addr2str - convert Ethernet address to string
  */
 
-static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+{
+       if (str_size < 18)      /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
+               return 1;
+
+       sprintf(str_buf, "%pM", a->value);
+       return 0;
+}
+
+/**
+ * eth_str2addr - convert string to Ethernet address
+ */
+
+static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
+{
+       char mac[ETH_ALEN];
+       int r;
+
+       r = sscanf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+                      (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2],
+                      (u32 *)&mac[3], (u32 *)&mac[4], (u32 *)&mac[5]);
+
+       if (r != ETH_ALEN)
+               return 1;
+
+       eth_media_addr_set(a, mac);
+       return 0;
+}
+
+/**
+ * eth_str2addr - convert Ethernet address format to message header format
+ */
+
+static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
 {
-       unchar *addr = (unchar *)&a->dev_addr;
+       memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
+       msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
+       memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN);
+       return 0;
+}
 
-       if (str_size < 18)
-               *str_buf = '\0';
-       else
-               sprintf(str_buf, "%pM", addr);
-       return str_buf;
+/**
+ * eth_str2addr - convert message header address format to Ethernet format
+ */
+
+static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
+{
+       if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
+               return 1;
+
+       eth_media_addr_set(a, msg_area + ETH_ADDR_OFFSET);
+       return 0;
 }
 
+/*
+ * Ethernet media registration info
+ */
+
+static struct tipc_media eth_media_info = {
+       .send_msg       = send_msg,
+       .enable_bearer  = enable_bearer,
+       .disable_bearer = disable_bearer,
+       .addr2str       = eth_addr2str,
+       .str2addr       = eth_str2addr,
+       .addr2msg       = eth_addr2msg,
+       .msg2addr       = eth_msg2addr,
+       .bcast_addr     = { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+                           TIPC_MEDIA_TYPE_ETH, 1 },
+       .priority       = TIPC_DEF_LINK_PRI,
+       .tolerance      = TIPC_DEF_LINK_TOL,
+       .window         = TIPC_DEF_LINK_WIN,
+       .type_id        = TIPC_MEDIA_TYPE_ETH,
+       .name           = "eth"
+};
+
 /**
  * tipc_eth_media_start - activate Ethernet bearer support
  *
@@ -266,21 +366,12 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
 
 int tipc_eth_media_start(void)
 {
-       struct tipc_media_addr bcast_addr;
        int res;
 
        if (eth_started)
                return -EINVAL;
 
-       bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
-       memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
-
-       memset(eth_bearers, 0, sizeof(eth_bearers));
-
-       res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
-                                 enable_bearer, disable_bearer, send_msg,
-                                 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
-                                 ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
+       res = tipc_register_media(&eth_media_info);
        if (res)
                return res;
 
@@ -298,22 +389,10 @@ int tipc_eth_media_start(void)
 
 void tipc_eth_media_stop(void)
 {
-       int i;
-
        if (!eth_started)
                return;
 
+       flush_scheduled_work();
        unregister_netdevice_notifier(&notifier);
-       for (i = 0; i < MAX_ETH_BEARERS ; i++) {
-               if (eth_bearers[i].bearer) {
-                       eth_bearers[i].bearer->blocked = 1;
-                       eth_bearers[i].bearer = NULL;
-               }
-               if (eth_bearers[i].dev) {
-                       dev_remove_pack(&eth_bearers[i].tipc_packet_type);
-                       dev_put(eth_bearers[i].dev);
-               }
-       }
-       memset(&eth_bearers, 0, sizeof(eth_bearers));
        eth_started = 0;
 }
index ae98a72da11a5445a3fb7f4e32ef1258d3abadd5..ac1832a66f8af9a415144408b63ecf6bdfe01b73 100644 (file)
 #define START_CHANGEOVER 100000u
 
 /**
- * struct link_name - deconstructed link name
+ * struct tipc_link_name - deconstructed link name
  * @addr_local: network address of node at this end
  * @if_local: name of interface at this end
  * @addr_peer: network address of node at far end
  * @if_peer: name of interface at far end
  */
 
-struct link_name {
+struct tipc_link_name {
        u32 addr_local;
        char if_local[TIPC_MAX_IF_NAME];
        u32 addr_peer;
        char if_peer[TIPC_MAX_IF_NAME];
 };
 
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                                       struct sk_buff *buf);
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
-static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
+static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
+static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
+                                    struct sk_buff **buf);
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
 static int  link_send_sections_long(struct tipc_port *sender,
                                    struct iovec const *msg_sect,
                                    u32 num_sect, unsigned int total_len,
                                    u32 destnode);
-static void link_check_defragm_bufs(struct link *l_ptr);
-static void link_state_event(struct link *l_ptr, u32 event);
-static void link_reset_statistics(struct link *l_ptr);
-static void link_print(struct link *l_ptr, const char *str);
-static void link_start(struct link *l_ptr);
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+static void link_check_defragm_bufs(struct tipc_link *l_ptr);
+static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static void link_reset_statistics(struct tipc_link *l_ptr);
+static void link_print(struct tipc_link *l_ptr, const char *str);
+static void link_start(struct tipc_link *l_ptr);
+static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 
 /*
  *  Simple link routines
@@ -110,7 +111,7 @@ static unsigned int align(unsigned int i)
        return (i + 3) & ~3u;
 }
 
-static void link_init_max_pkt(struct link *l_ptr)
+static void link_init_max_pkt(struct tipc_link *l_ptr)
 {
        u32 max_pkt;
 
@@ -127,14 +128,14 @@ static void link_init_max_pkt(struct link *l_ptr)
        l_ptr->max_pkt_probes = 0;
 }
 
-static u32 link_next_sent(struct link *l_ptr)
+static u32 link_next_sent(struct tipc_link *l_ptr)
 {
        if (l_ptr->next_out)
-               return msg_seqno(buf_msg(l_ptr->next_out));
+               return buf_seqno(l_ptr->next_out);
        return mod(l_ptr->next_out_no);
 }
 
-static u32 link_last_sent(struct link *l_ptr)
+static u32 link_last_sent(struct tipc_link *l_ptr)
 {
        return mod(link_next_sent(l_ptr) - 1);
 }
@@ -143,28 +144,29 @@ static u32 link_last_sent(struct link *l_ptr)
  *  Simple non-static link routines (i.e. referenced outside this file)
  */
 
-int tipc_link_is_up(struct link *l_ptr)
+int tipc_link_is_up(struct tipc_link *l_ptr)
 {
        if (!l_ptr)
                return 0;
        return link_working_working(l_ptr) || link_working_unknown(l_ptr);
 }
 
-int tipc_link_is_active(struct link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l_ptr)
 {
        return  (l_ptr->owner->active_links[0] == l_ptr) ||
                (l_ptr->owner->active_links[1] == l_ptr);
 }
 
 /**
- * link_name_validate - validate & (optionally) deconstruct link name
+ * link_name_validate - validate & (optionally) deconstruct tipc_link name
  * @name - ptr to link name string
  * @name_parts - ptr to area for link name components (or NULL if not needed)
  *
  * Returns 1 if link name is valid, otherwise 0.
  */
 
-static int link_name_validate(const char *name, struct link_name *name_parts)
+static int link_name_validate(const char *name,
+                               struct tipc_link_name *name_parts)
 {
        char name_copy[TIPC_MAX_LINK_NAME];
        char *addr_local;
@@ -238,7 +240,7 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
  * tipc_node_delete() is called.)
  */
 
-static void link_timeout(struct link *l_ptr)
+static void link_timeout(struct tipc_link *l_ptr)
 {
        tipc_node_lock(l_ptr->owner);
 
@@ -287,7 +289,7 @@ static void link_timeout(struct link *l_ptr)
        tipc_node_unlock(l_ptr->owner);
 }
 
-static void link_set_timer(struct link *l_ptr, u32 time)
+static void link_set_timer(struct tipc_link *l_ptr, u32 time)
 {
        k_start_timer(&l_ptr->timer, time);
 }
@@ -301,11 +303,11 @@ static void link_set_timer(struct link *l_ptr, u32 time)
  * Returns pointer to link.
  */
 
-struct link *tipc_link_create(struct tipc_node *n_ptr,
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                              struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr)
 {
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct tipc_msg *msg;
        char *if_name;
        char addr_string[16];
@@ -343,7 +345,7 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
        l_ptr->checkpoint = 1;
        l_ptr->peer_session = INVALID_SESSION;
        l_ptr->b_ptr = b_ptr;
-       link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+       link_set_supervision_props(l_ptr, b_ptr->tolerance);
        l_ptr->state = RESET_UNKNOWN;
 
        l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
@@ -355,7 +357,7 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
        strcpy((char *)msg_data(msg), if_name);
 
        l_ptr->priority = b_ptr->priority;
-       tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
+       tipc_link_set_queue_limits(l_ptr, b_ptr->window);
 
        link_init_max_pkt(l_ptr);
 
@@ -382,7 +384,7 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
  * to avoid a potential deadlock situation.
  */
 
-void tipc_link_delete(struct link *l_ptr)
+void tipc_link_delete(struct tipc_link *l_ptr)
 {
        if (!l_ptr) {
                err("Attempt to delete non-existent link\n");
@@ -401,7 +403,7 @@ void tipc_link_delete(struct link *l_ptr)
        kfree(l_ptr);
 }
 
-static void link_start(struct link *l_ptr)
+static void link_start(struct tipc_link *l_ptr)
 {
        tipc_node_lock(l_ptr->owner);
        link_state_event(l_ptr, STARTING_EVT);
@@ -418,7 +420,7 @@ static void link_start(struct link *l_ptr)
  * has abated.
  */
 
-static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
 {
        struct tipc_port *p_ptr;
 
@@ -440,7 +442,7 @@ exit:
        return -ELINKCONG;
 }
 
-void tipc_link_wakeup_ports(struct link *l_ptr, int all)
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
 {
        struct tipc_port *p_ptr;
        struct tipc_port *temp_p_ptr;
@@ -475,7 +477,7 @@ exit:
  * @l_ptr: pointer to link
  */
 
-static void link_release_outqueue(struct link *l_ptr)
+static void link_release_outqueue(struct tipc_link *l_ptr)
 {
        struct sk_buff *buf = l_ptr->first_out;
        struct sk_buff *next;
@@ -494,7 +496,7 @@ static void link_release_outqueue(struct link *l_ptr)
  * @l_ptr: pointer to link
  */
 
-void tipc_link_reset_fragments(struct link *l_ptr)
+void tipc_link_reset_fragments(struct tipc_link *l_ptr)
 {
        struct sk_buff *buf = l_ptr->defragm_buf;
        struct sk_buff *next;
@@ -512,7 +514,7 @@ void tipc_link_reset_fragments(struct link *l_ptr)
  * @l_ptr: pointer to link
  */
 
-void tipc_link_stop(struct link *l_ptr)
+void tipc_link_stop(struct tipc_link *l_ptr)
 {
        struct sk_buff *buf;
        struct sk_buff *next;
@@ -537,7 +539,7 @@ void tipc_link_stop(struct link *l_ptr)
        l_ptr->proto_msg_queue = NULL;
 }
 
-void tipc_link_reset(struct link *l_ptr)
+void tipc_link_reset(struct tipc_link *l_ptr)
 {
        struct sk_buff *buf;
        u32 prev_state = l_ptr->state;
@@ -597,7 +599,7 @@ void tipc_link_reset(struct link *l_ptr)
 }
 
 
-static void link_activate(struct link *l_ptr)
+static void link_activate(struct tipc_link *l_ptr)
 {
        l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
        tipc_node_link_up(l_ptr->owner, l_ptr);
@@ -610,9 +612,9 @@ static void link_activate(struct link *l_ptr)
  * @event: state machine event to process
  */
 
-static void link_state_event(struct link *l_ptr, unsigned event)
+static void link_state_event(struct tipc_link *l_ptr, unsigned event)
 {
-       struct link *other;
+       struct tipc_link *other;
        u32 cont_intv = l_ptr->continuity_interval;
 
        if (!l_ptr->started && (event != STARTING_EVT))
@@ -784,7 +786,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
  * the tail of an existing one.
  */
 
-static int link_bundle_buf(struct link *l_ptr,
+static int link_bundle_buf(struct tipc_link *l_ptr,
                           struct sk_buff *bundler,
                           struct sk_buff *buf)
 {
@@ -813,7 +815,7 @@ static int link_bundle_buf(struct link *l_ptr,
        return 1;
 }
 
-static void link_add_to_outqueue(struct link *l_ptr,
+static void link_add_to_outqueue(struct tipc_link *l_ptr,
                                 struct sk_buff *buf,
                                 struct tipc_msg *msg)
 {
@@ -834,7 +836,7 @@ static void link_add_to_outqueue(struct link *l_ptr,
                l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
 }
 
-static void link_add_chain_to_outqueue(struct link *l_ptr,
+static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
                                       struct sk_buff *buf_chain,
                                       u32 long_msgno)
 {
@@ -859,7 +861,7 @@ static void link_add_chain_to_outqueue(struct link *l_ptr,
  * has failed, and from link_send()
  */
 
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        u32 size = msg_size(msg);
@@ -954,7 +956,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
 
 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 {
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
        int res = -ELINKCONG;
 
@@ -988,7 +990,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 void tipc_link_send_names(struct list_head *message_list, u32 dest)
 {
        struct tipc_node *n_ptr;
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct sk_buff *buf;
        struct sk_buff *temp_buf;
 
@@ -1027,7 +1029,7 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
  * Link is locked. Returns user data length.
  */
 
-static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
+static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
                              u32 *used_max_pkt)
 {
        struct tipc_msg *msg = buf_msg(buf);
@@ -1061,7 +1063,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
  */
 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
 {
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
        int res;
        u32 selector = msg_origport(buf_msg(buf)) & 1;
@@ -1100,7 +1102,7 @@ int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 u32 destaddr)
 {
        struct tipc_msg *hdr = &sender->phdr;
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct sk_buff *buf;
        struct tipc_node *node;
        int res;
@@ -1195,7 +1197,7 @@ static int link_send_sections_long(struct tipc_port *sender,
                                   unsigned int total_len,
                                   u32 destaddr)
 {
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct tipc_node *node;
        struct tipc_msg *hdr = &sender->phdr;
        u32 dsz = total_len;
@@ -1342,7 +1344,7 @@ reject:
 /*
  * tipc_link_push_packet: Push one unsent packet to the media
  */
-u32 tipc_link_push_packet(struct link *l_ptr)
+u32 tipc_link_push_packet(struct tipc_link *l_ptr)
 {
        struct sk_buff *buf = l_ptr->first_out;
        u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1354,7 +1356,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
        if (r_q_size && buf) {
                u32 last = lesser(mod(r_q_head + r_q_size),
                                  link_last_sent(l_ptr));
-               u32 first = msg_seqno(buf_msg(buf));
+               u32 first = buf_seqno(buf);
 
                while (buf && less(first, r_q_head)) {
                        first = mod(first + 1);
@@ -1403,7 +1405,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
        if (buf) {
                struct tipc_msg *msg = buf_msg(buf);
                u32 next = msg_seqno(msg);
-               u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+               u32 first = buf_seqno(l_ptr->first_out);
 
                if (mod(next - first) < l_ptr->queue_limit[0]) {
                        msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
@@ -1426,7 +1428,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
  * push_queue(): push out the unsent messages of a link where
  *               congestion has abated. Node is locked
  */
-void tipc_link_push_queue(struct link *l_ptr)
+void tipc_link_push_queue(struct tipc_link *l_ptr)
 {
        u32 res;
 
@@ -1470,7 +1472,8 @@ static void link_reset_all(unsigned long addr)
        read_unlock_bh(&tipc_net_lock);
 }
 
-static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
+static void link_retransmit_failure(struct tipc_link *l_ptr,
+                                       struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
 
@@ -1514,7 +1517,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
        }
 }
 
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
+void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
                          u32 retransmits)
 {
        struct tipc_msg *msg;
@@ -1558,7 +1561,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
                } else {
                        tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
                        l_ptr->stats.bearer_congs++;
-                       l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+                       l_ptr->retransm_queue_head = buf_seqno(buf);
                        l_ptr->retransm_queue_size = retransmits;
                        return;
                }
@@ -1571,7 +1574,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
  * link_insert_deferred_queue - insert deferred messages back into receive chain
  */
 
-static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
+static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
                                                  struct sk_buff *buf)
 {
        u32 seq_no;
@@ -1579,7 +1582,7 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
        if (l_ptr->oldest_deferred_in == NULL)
                return buf;
 
-       seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+       seq_no = buf_seqno(l_ptr->oldest_deferred_in);
        if (seq_no == mod(l_ptr->next_in_no)) {
                l_ptr->newest_deferred_in->next = buf;
                buf = l_ptr->oldest_deferred_in;
@@ -1653,7 +1656,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
        read_lock_bh(&tipc_net_lock);
        while (head) {
                struct tipc_node *n_ptr;
-               struct link *l_ptr;
+               struct tipc_link *l_ptr;
                struct sk_buff *crs;
                struct sk_buff *buf = head;
                struct tipc_msg *msg;
@@ -1733,14 +1736,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
 
                /* Release acked messages */
 
-               if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
-                       if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
-                               tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-               }
+               if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
+                       tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
 
                crs = l_ptr->first_out;
                while ((crs != l_ptr->next_out) &&
-                      less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+                      less_eq(buf_seqno(crs), ackd)) {
                        struct sk_buff *next = crs->next;
 
                        buf_discard(crs);
@@ -1863,7 +1864,7 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
 {
        struct sk_buff *prev = NULL;
        struct sk_buff *crs = *head;
-       u32 seq_no = msg_seqno(buf_msg(buf));
+       u32 seq_no = buf_seqno(buf);
 
        buf->next = NULL;
 
@@ -1874,7 +1875,7 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
        }
 
        /* Last ? */
-       if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
+       if (less(buf_seqno(*tail), seq_no)) {
                (*tail)->next = buf;
                *tail = buf;
                return 1;
@@ -1908,10 +1909,10 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
  */
 
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                                       struct sk_buff *buf)
 {
-       u32 seq_no = msg_seqno(buf_msg(buf));
+       u32 seq_no = buf_seqno(buf);
 
        if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
                link_recv_proto_msg(l_ptr, buf);
@@ -1946,8 +1947,9 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
 /*
  * Send protocol message to the other endpoint.
  */
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
-                             u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
+                               int probe_msg, u32 gap, u32 tolerance,
+                               u32 priority, u32 ack_mtu)
 {
        struct sk_buff *buf = NULL;
        struct tipc_msg *msg = l_ptr->pmsg;
@@ -1973,10 +1975,10 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
                if (!tipc_link_is_up(l_ptr))
                        return;
                if (l_ptr->next_out)
-                       next_sent = msg_seqno(buf_msg(l_ptr->next_out));
+                       next_sent = buf_seqno(l_ptr->next_out);
                msg_set_next_sent(msg, next_sent);
                if (l_ptr->oldest_deferred_in) {
-                       u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+                       u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
                        gap = mod(rec - mod(l_ptr->next_in_no));
                }
                msg_set_seq_gap(msg, gap);
@@ -2064,7 +2066,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
  * change at any time. The node with lowest address rules
  */
 
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
+static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
        u32 rec_gap = 0;
        u32 max_pkt_info;
@@ -2197,12 +2199,12 @@ exit:
  * tipc_link_tunnel(): Send one message via a link belonging to
  * another bearer. Owner node is locked.
  */
-static void tipc_link_tunnel(struct link *l_ptr,
+static void tipc_link_tunnel(struct tipc_link *l_ptr,
                             struct tipc_msg *tunnel_hdr,
                             struct tipc_msg  *msg,
                             u32 selector)
 {
-       struct link *tunnel;
+       struct tipc_link *tunnel;
        struct sk_buff *buf;
        u32 length = msg_size(msg);
 
@@ -2231,11 +2233,11 @@ static void tipc_link_tunnel(struct link *l_ptr,
  *               Owner node is locked.
  */
 
-void tipc_link_changeover(struct link *l_ptr)
+void tipc_link_changeover(struct tipc_link *l_ptr)
 {
        u32 msgcount = l_ptr->out_queue_size;
        struct sk_buff *crs = l_ptr->first_out;
-       struct link *tunnel = l_ptr->owner->active_links[0];
+       struct tipc_link *tunnel = l_ptr->owner->active_links[0];
        struct tipc_msg tunnel_hdr;
        int split_bundles;
 
@@ -2294,7 +2296,7 @@ void tipc_link_changeover(struct link *l_ptr)
        }
 }
 
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
 {
        struct sk_buff *iter;
        struct tipc_msg tunnel_hdr;
@@ -2358,11 +2360,11 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
  *  via other link. Node is locked. Return extracted buffer.
  */
 
-static int link_recv_changeover_msg(struct link **l_ptr,
+static int link_recv_changeover_msg(struct tipc_link **l_ptr,
                                    struct sk_buff **buf)
 {
        struct sk_buff *tunnel_buf = *buf;
-       struct link *dest_link;
+       struct tipc_link *dest_link;
        struct tipc_msg *msg;
        struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
        u32 msg_typ = msg_type(tunnel_msg);
@@ -2462,7 +2464,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
  * The buffer is complete, inclusive total message length.
  * Returns user data length.
  */
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
        struct sk_buff *buf_chain = NULL;
        struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
@@ -2591,7 +2593,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
 
        /* Is there an incomplete message waiting for this fragment? */
 
-       while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
+       while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
                        (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
                prev = pbuf;
                pbuf = pbuf->next;
@@ -2658,7 +2660,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
  * @l_ptr: pointer to link
  */
 
-static void link_check_defragm_bufs(struct link *l_ptr)
+static void link_check_defragm_bufs(struct tipc_link *l_ptr)
 {
        struct sk_buff *prev = NULL;
        struct sk_buff *next = NULL;
@@ -2688,7 +2690,7 @@ static void link_check_defragm_bufs(struct link *l_ptr)
 
 
 
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
 {
        if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
                return;
@@ -2700,7 +2702,7 @@ static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
 }
 
 
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 {
        /* Data messages from this node, inclusive FIRST_FRAGM */
        l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
@@ -2730,11 +2732,12 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
  * Returns pointer to link (or 0 if invalid link name).
  */
 
-static struct link *link_find_link(const char *name, struct tipc_node **node)
+static struct tipc_link *link_find_link(const char *name,
+                                       struct tipc_node **node)
 {
-       struct link_name link_name_parts;
+       struct tipc_link_name link_name_parts;
        struct tipc_bearer *b_ptr;
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
 
        if (!link_name_validate(name, &link_name_parts))
                return NULL;
@@ -2754,13 +2757,113 @@ static struct link *link_find_link(const char *name, struct tipc_node **node)
        return l_ptr;
 }
 
+/**
+ * link_value_is_valid -- validate proposed link tolerance/priority/window
+ *
+ * @cmd - value type (TIPC_CMD_SET_LINK_*)
+ * @new_value - the new value
+ *
+ * Returns 1 if value is within range, 0 if not.
+ */
+
+static int link_value_is_valid(u16 cmd, u32 new_value)
+{
+       switch (cmd) {
+       case TIPC_CMD_SET_LINK_TOL:
+               return (new_value >= TIPC_MIN_LINK_TOL) &&
+                       (new_value <= TIPC_MAX_LINK_TOL);
+       case TIPC_CMD_SET_LINK_PRI:
+               return (new_value <= TIPC_MAX_LINK_PRI);
+       case TIPC_CMD_SET_LINK_WINDOW:
+               return (new_value >= TIPC_MIN_LINK_WIN) &&
+                       (new_value <= TIPC_MAX_LINK_WIN);
+       }
+       return 0;
+}
+
+
+/**
+ * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
+ * @name - ptr to link, bearer, or media name
+ * @new_value - new value of link, bearer, or media setting
+ * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
+ *
+ * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
+ *
+ * Returns 0 if value updated and negative value on error.
+ */
+
+static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
+{
+       struct tipc_node *node;
+       struct tipc_link *l_ptr;
+       struct tipc_bearer *b_ptr;
+       struct tipc_media *m_ptr;
+
+       l_ptr = link_find_link(name, &node);
+       if (l_ptr) {
+               /*
+                * acquire node lock for tipc_link_send_proto_msg().
+                * see "TIPC locking policy" in net.c.
+                */
+               tipc_node_lock(node);
+               switch (cmd) {
+               case TIPC_CMD_SET_LINK_TOL:
+                       link_set_supervision_props(l_ptr, new_value);
+                       tipc_link_send_proto_msg(l_ptr,
+                               STATE_MSG, 0, 0, new_value, 0, 0);
+                       break;
+               case TIPC_CMD_SET_LINK_PRI:
+                       l_ptr->priority = new_value;
+                       tipc_link_send_proto_msg(l_ptr,
+                               STATE_MSG, 0, 0, 0, new_value, 0);
+                       break;
+               case TIPC_CMD_SET_LINK_WINDOW:
+                       tipc_link_set_queue_limits(l_ptr, new_value);
+                       break;
+               }
+               tipc_node_unlock(node);
+               return 0;
+       }
+
+       b_ptr = tipc_bearer_find(name);
+       if (b_ptr) {
+               switch (cmd) {
+               case TIPC_CMD_SET_LINK_TOL:
+                       b_ptr->tolerance = new_value;
+                       return 0;
+               case TIPC_CMD_SET_LINK_PRI:
+                       b_ptr->priority = new_value;
+                       return 0;
+               case TIPC_CMD_SET_LINK_WINDOW:
+                       b_ptr->window = new_value;
+                       return 0;
+               }
+               return -EINVAL;
+       }
+
+       m_ptr = tipc_media_find(name);
+       if (!m_ptr)
+               return -ENODEV;
+       switch (cmd) {
+       case TIPC_CMD_SET_LINK_TOL:
+               m_ptr->tolerance = new_value;
+               return 0;
+       case TIPC_CMD_SET_LINK_PRI:
+               m_ptr->priority = new_value;
+               return 0;
+       case TIPC_CMD_SET_LINK_WINDOW:
+               m_ptr->window = new_value;
+               return 0;
+       }
+       return -EINVAL;
+}
+
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
                                     u16 cmd)
 {
        struct tipc_link_config *args;
        u32 new_value;
-       struct link *l_ptr;
-       struct tipc_node *node;
        int res;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
@@ -2769,6 +2872,10 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
        args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
        new_value = ntohl(args->value);
 
+       if (!link_value_is_valid(cmd, new_value))
+               return tipc_cfg_reply_error_string(
+                       "cannot change, value invalid");
+
        if (!strcmp(args->name, tipc_bclink_name)) {
                if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
                    (tipc_bclink_set_queue_limits(new_value) == 0))
@@ -2778,43 +2885,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
        }
 
        read_lock_bh(&tipc_net_lock);
-       l_ptr = link_find_link(args->name, &node);
-       if (!l_ptr) {
-               read_unlock_bh(&tipc_net_lock);
-               return tipc_cfg_reply_error_string("link not found");
-       }
-
-       tipc_node_lock(node);
-       res = -EINVAL;
-       switch (cmd) {
-       case TIPC_CMD_SET_LINK_TOL:
-               if ((new_value >= TIPC_MIN_LINK_TOL) &&
-                   (new_value <= TIPC_MAX_LINK_TOL)) {
-                       link_set_supervision_props(l_ptr, new_value);
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-                                                0, 0, new_value, 0, 0);
-                       res = 0;
-               }
-               break;
-       case TIPC_CMD_SET_LINK_PRI:
-               if ((new_value >= TIPC_MIN_LINK_PRI) &&
-                   (new_value <= TIPC_MAX_LINK_PRI)) {
-                       l_ptr->priority = new_value;
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-                                                0, 0, 0, new_value, 0);
-                       res = 0;
-               }
-               break;
-       case TIPC_CMD_SET_LINK_WINDOW:
-               if ((new_value >= TIPC_MIN_LINK_WIN) &&
-                   (new_value <= TIPC_MAX_LINK_WIN)) {
-                       tipc_link_set_queue_limits(l_ptr, new_value);
-                       res = 0;
-               }
-               break;
-       }
-       tipc_node_unlock(node);
-
+       res = link_cmd_set_value(args->name, new_value, cmd);
        read_unlock_bh(&tipc_net_lock);
        if (res)
                return tipc_cfg_reply_error_string("cannot change link setting");
@@ -2827,7 +2898,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
  * @l_ptr: pointer to link
  */
 
-static void link_reset_statistics(struct link *l_ptr)
+static void link_reset_statistics(struct tipc_link *l_ptr)
 {
        memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
        l_ptr->stats.sent_info = l_ptr->next_out_no;
@@ -2837,7 +2908,7 @@ static void link_reset_statistics(struct link *l_ptr)
 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
 {
        char *link_name;
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct tipc_node *node;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
@@ -2885,7 +2956,7 @@ static u32 percent(u32 count, u32 total)
 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
 {
        struct print_buf pb;
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct tipc_node *node;
        char *status;
        u32 profile_total = 0;
@@ -3007,7 +3078,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
 {
        struct tipc_node *n_ptr;
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        u32 res = MAX_PKT_DEFAULT;
 
        if (dest == tipc_own_addr)
@@ -3026,7 +3097,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
        return res;
 }
 
-static void link_print(struct link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l_ptr, const char *str)
 {
        char print_area[256];
        struct print_buf pb;
@@ -3046,13 +3117,12 @@ static void link_print(struct link *l_ptr, const char *str)
        tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
        tipc_printf(buf, "SQUE");
        if (l_ptr->first_out) {
-               tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
+               tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
                if (l_ptr->next_out)
-                       tipc_printf(buf, "%u..",
-                                   msg_seqno(buf_msg(l_ptr->next_out)));
-               tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
-               if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
-                        msg_seqno(buf_msg(l_ptr->first_out)))
+                       tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
+               tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
+               if ((mod(buf_seqno(l_ptr->last_out) -
+                        buf_seqno(l_ptr->first_out))
                     != (l_ptr->out_queue_size - 1)) ||
                    (l_ptr->last_out->next != NULL)) {
                        tipc_printf(buf, "\nSend queue inconsistency\n");
@@ -3064,8 +3134,8 @@ static void link_print(struct link *l_ptr, const char *str)
                tipc_printf(buf, "[]");
        tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
        if (l_ptr->oldest_deferred_in) {
-               u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
-               u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
+               u32 o = buf_seqno(l_ptr->oldest_deferred_in);
+               u32 n = buf_seqno(l_ptr->newest_deferred_in);
                tipc_printf(buf, ":RQUE[%u..%u]", o, n);
                if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
                        tipc_printf(buf, ":RQSIZ(%u)",
index e56cb532913e0fa72f278c90aea1ecbc7846cce8..73c18c140e1d19009d0ea27f83706c8889bcca72 100644 (file)
 #define PUSH_FAILED   1
 #define PUSH_FINISHED 2
 
+/*
+ * Out-of-range value for link sequence numbers
+ */
+
+#define INVALID_LINK_SEQ 0x10000
+
 /*
  * Link states
  */
@@ -61,7 +67,7 @@
 #define MAX_PKT_DEFAULT 1500
 
 /**
- * struct link - TIPC link data structure
+ * struct tipc_link - TIPC link data structure
  * @addr: network address of link's peer node
  * @name: link name character string
  * @media_addr: media address to use when sending messages over link
  * @stats: collects statistics regarding link activity
  */
 
-struct link {
+struct tipc_link {
        u32 addr;
        char name[TIPC_MAX_LINK_NAME];
        struct tipc_media_addr media_addr;
@@ -207,24 +213,24 @@ struct link {
 
 struct tipc_port;
 
-struct link *tipc_link_create(struct tipc_node *n_ptr,
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                              struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct link *l_ptr);
-void tipc_link_changeover(struct link *l_ptr);
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
-void tipc_link_reset_fragments(struct link *l_ptr);
-int tipc_link_is_up(struct link *l_ptr);
-int tipc_link_is_active(struct link *l_ptr);
-u32 tipc_link_push_packet(struct link *l_ptr);
-void tipc_link_stop(struct link *l_ptr);
+void tipc_link_delete(struct tipc_link *l_ptr);
+void tipc_link_changeover(struct tipc_link *l_ptr);
+void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *dest);
+void tipc_link_reset_fragments(struct tipc_link *l_ptr);
+int tipc_link_is_up(struct tipc_link *l_ptr);
+int tipc_link_is_active(struct tipc_link *l_ptr);
+u32 tipc_link_push_packet(struct tipc_link *l_ptr);
+void tipc_link_stop(struct tipc_link *l_ptr);
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
-void tipc_link_reset(struct link *l_ptr);
+void tipc_link_reset(struct tipc_link *l_ptr);
 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
 void tipc_link_send_names(struct list_head *message_list, u32 dest);
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
+int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
 int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
@@ -235,19 +241,26 @@ void tipc_link_recv_bundle(struct sk_buff *buf);
 int  tipc_link_recv_fragment(struct sk_buff **pending,
                             struct sk_buff **fb,
                             struct tipc_msg **msg);
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
-                             u32 tolerance, u32 priority, u32 acked_mtu);
-void tipc_link_push_queue(struct link *l_ptr);
+void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob,
+                             u32 gap, u32 tolerance, u32 priority,
+                             u32 acked_mtu);
+void tipc_link_push_queue(struct tipc_link *l_ptr);
 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
                   struct sk_buff *buf);
-void tipc_link_wakeup_ports(struct link *l_ptr, int all);
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
+void tipc_link_retransmit(struct tipc_link *l_ptr,
+                         struct sk_buff *start, u32 retransmits);
 
 /*
  * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
  */
 
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+       return msg_seqno(buf_msg(buf));
+}
+
 static inline u32 mod(u32 x)
 {
        return x & 0xffffu;
@@ -282,32 +295,32 @@ static inline u32 lesser(u32 left, u32 right)
  * Link status checking routines
  */
 
-static inline int link_working_working(struct link *l_ptr)
+static inline int link_working_working(struct tipc_link *l_ptr)
 {
        return l_ptr->state == WORKING_WORKING;
 }
 
-static inline int link_working_unknown(struct link *l_ptr)
+static inline int link_working_unknown(struct tipc_link *l_ptr)
 {
        return l_ptr->state == WORKING_UNKNOWN;
 }
 
-static inline int link_reset_unknown(struct link *l_ptr)
+static inline int link_reset_unknown(struct tipc_link *l_ptr)
 {
        return l_ptr->state == RESET_UNKNOWN;
 }
 
-static inline int link_reset_reset(struct link *l_ptr)
+static inline int link_reset_reset(struct tipc_link *l_ptr)
 {
        return l_ptr->state == RESET_RESET;
 }
 
-static inline int link_blocked(struct link *l_ptr)
+static inline int link_blocked(struct tipc_link *l_ptr)
 {
        return l_ptr->exp_msg_count || l_ptr->blocked;
 }
 
-static inline int link_congested(struct link *l_ptr)
+static inline int link_congested(struct tipc_link *l_ptr)
 {
        return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
 }
index 83d50967910c6a61bf276742aeee957959796406..3e4d3e29be61fd516a296553b035bee0bba85ca3 100644 (file)
@@ -333,11 +333,14 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
        }
 
        if (msg_user(msg) ==  LINK_CONFIG) {
-               u32 *raw = (u32 *)msg;
-               struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5];
+               struct tipc_media_addr orig;
+
                tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
                tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
-               tipc_media_addr_printf(buf, orig);
+               memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value));
+               orig.media_id = 0;
+               orig.broadcast = 0;
+               tipc_media_addr_printf(buf, &orig);
        }
        if (msg_user(msg) == BCAST_PROTOCOL) {
                tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
index d93178f2e852eb0c77344d7ba3e29aa616dcce20..7b0cda1671072088a9c97af7b5b8aa402c0540d7 100644 (file)
@@ -78,6 +78,8 @@
 
 #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
 
+#define TIPC_MEDIA_ADDR_OFFSET 5
+
 
 struct tipc_msg {
        __be32 hdr[15];
@@ -682,6 +684,10 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
        msg_set_bits(m, 5, 12, 0x1, r);
 }
 
+static inline char *msg_media_addr(struct tipc_msg *m)
+{
+       return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET];
+}
 
 /*
  * Word 9
@@ -734,14 +740,4 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
                   u32 num_sect, unsigned int total_len,
                            int max_size, int usrmem, struct sk_buff **buf);
 
-static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
-       memcpy(&((int *)m)[5], a, sizeof(*a));
-}
-
-static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
-       memcpy(a, &((int *)m)[5], sizeof(*a));
-}
-
 #endif
index b7ca1bd7b1517dbc7f20d9097ea8139bbe13fe8d..98ebb37f1808cd126809659dd6b6bd549b184b1e 100644 (file)
@@ -176,7 +176,7 @@ void tipc_named_withdraw(struct publication *publ)
 void tipc_named_node_up(unsigned long nodearg)
 {
        struct tipc_node *n_ptr;
-       struct link *l_ptr;
+       struct tipc_link *l_ptr;
        struct publication *publ;
        struct distr_item *item = NULL;
        struct sk_buff *buf = NULL;
@@ -322,10 +322,9 @@ void tipc_named_recv(struct sk_buff *buf)
 /**
  * tipc_named_reinit - re-initialize local publication list
  *
- * This routine is called whenever TIPC networking is (re)enabled.
+ * This routine is called whenever TIPC networking is enabled.
  * All existing publications by this node that have "cluster" or "zone" scope
- * are updated to reflect the node's current network address.
- * (If the node's address is unchanged, the update loop terminates immediately.)
+ * are updated to reflect the node's new network address.
  */
 
 void tipc_named_reinit(void)
@@ -333,10 +332,9 @@ void tipc_named_reinit(void)
        struct publication *publ;
 
        write_lock_bh(&tipc_nametbl_lock);
-       list_for_each_entry(publ, &publ_root, local_list) {
-               if (publ->node == tipc_own_addr)
-                       break;
+
+       list_for_each_entry(publ, &publ_root, local_list)
                publ->node = tipc_own_addr;
-       }
+
        write_unlock_bh(&tipc_nametbl_lock);
 }
index 46e6b6c2ecc9fe3e78e662d1506ce545a126d028..89eb5621ebbad423edfe8c6a8f112f4a21555b91 100644 (file)
@@ -251,8 +251,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
                                                    u32 type, u32 lower, u32 upper,
                                                    u32 scope, u32 node, u32 port, u32 key)
 {
-       struct subscription *s;
-       struct subscription *st;
+       struct tipc_subscription *s;
+       struct tipc_subscription *st;
        struct publication *publ;
        struct sub_seq *sseq;
        struct name_info *info;
@@ -381,7 +381,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
        struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
        struct name_info *info;
        struct sub_seq *free;
-       struct subscription *s, *st;
+       struct tipc_subscription *s, *st;
        int removed_subseq = 0;
 
        if (!sseq)
@@ -448,7 +448,8 @@ found:
  * sequence overlapping with the requested sequence
  */
 
-static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+static void tipc_nameseq_subscribe(struct name_seq *nseq,
+                                       struct tipc_subscription *s)
 {
        struct sub_seq *sseq = nseq->sseqs;
 
@@ -625,7 +626,7 @@ not_found:
  */
 
 int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                             struct port_list *dports)
+                             struct tipc_port_list *dports)
 {
        struct name_seq *seq;
        struct sub_seq *sseq;
@@ -739,7 +740,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
  * tipc_nametbl_subscribe - add a subscription object to the name table
  */
 
-void tipc_nametbl_subscribe(struct subscription *s)
+void tipc_nametbl_subscribe(struct tipc_subscription *s)
 {
        u32 type = s->seq.type;
        struct name_seq *seq;
@@ -763,7 +764,7 @@ void tipc_nametbl_subscribe(struct subscription *s)
  * tipc_nametbl_unsubscribe - remove a subscription object from name table
  */
 
-void tipc_nametbl_unsubscribe(struct subscription *s)
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
 {
        struct name_seq *seq;
 
index 62d77e5e902e3de5df72c88630e1d9b3b1ebc5f1..8086b42f92ad0a9021d97caeccd694475086a073 100644 (file)
@@ -39,8 +39,8 @@
 
 #include "node_subscr.h"
 
-struct subscription;
-struct port_list;
+struct tipc_subscription;
+struct tipc_port_list;
 
 /*
  * TIPC name types reserved for internal TIPC use (both current and planned)
@@ -90,7 +90,7 @@ extern rwlock_t tipc_nametbl_lock;
 struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
 u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
 int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                        struct port_list *dports);
+                        struct tipc_port_list *dports);
 int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
                        struct tipc_name_seq const *seq);
 struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
@@ -100,8 +100,8 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
                                        u32 scope, u32 node, u32 ref, u32 key);
 struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
                                        u32 node, u32 ref, u32 key);
-void tipc_nametbl_subscribe(struct subscription *s);
-void tipc_nametbl_unsubscribe(struct subscription *s);
+void tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
 int tipc_nametbl_init(void);
 void tipc_nametbl_stop(void);
 
index fafef6c3c0f65088dde6ca402ec74e19b2f15183..61afee7e82912ef4257146265e75989704f4dc27 100644 (file)
@@ -174,7 +174,6 @@ void tipc_net_route_msg(struct sk_buff *buf)
 int tipc_net_start(u32 addr)
 {
        char addr_string[16];
-       int res;
 
        if (tipc_mode != TIPC_NODE_MODE)
                return -ENOPROTOOPT;
@@ -187,9 +186,7 @@ int tipc_net_start(u32 addr)
        tipc_named_reinit();
        tipc_port_reinit();
 
-       res = tipc_bclink_init();
-       if (res)
-               return res;
+       tipc_bclink_init();
 
        tipc_k_signal((Handler)tipc_subscr_start, 0);
        tipc_k_signal((Handler)tipc_cfg_init, 0);
@@ -207,8 +204,8 @@ void tipc_net_stop(void)
        if (tipc_mode != TIPC_NET_MODE)
                return;
        write_lock_bh(&tipc_net_lock);
-       tipc_bearer_stop();
        tipc_mode = TIPC_NODE_MODE;
+       tipc_bearer_stop();
        tipc_bclink_stop();
        list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
                tipc_node_delete(node);
index 27b4bb0cca6c4c58dba468ba90d7fe02be8e3c8a..6b226faad89fb30790639bb68df1f672aadf03ab 100644 (file)
@@ -136,9 +136,9 @@ void tipc_node_delete(struct tipc_node *n_ptr)
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
 
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
-       struct link **active = &n_ptr->active_links[0];
+       struct tipc_link **active = &n_ptr->active_links[0];
 
        n_ptr->working_links++;
 
@@ -171,14 +171,14 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
 
 static void node_select_active_links(struct tipc_node *n_ptr)
 {
-       struct link **active = &n_ptr->active_links[0];
+       struct tipc_link **active = &n_ptr->active_links[0];
        u32 i;
        u32 highest_prio = 0;
 
        active[0] = active[1] = NULL;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               struct link *l_ptr = n_ptr->links[i];
+               struct tipc_link *l_ptr = n_ptr->links[i];
 
                if (!l_ptr || !tipc_link_is_up(l_ptr) ||
                    (l_ptr->priority < highest_prio))
@@ -197,9 +197,9 @@ static void node_select_active_links(struct tipc_node *n_ptr)
  * tipc_node_link_down - handle loss of link
  */
 
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
-       struct link **active;
+       struct tipc_link **active;
 
        n_ptr->working_links--;
 
@@ -239,14 +239,14 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
        return tipc_node_active_links(n_ptr);
 }
 
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
        atomic_inc(&tipc_num_links);
        n_ptr->link_cnt++;
 }
 
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        n_ptr->links[l_ptr->b_ptr->identity] = NULL;
        atomic_dec(&tipc_num_links);
@@ -307,7 +307,7 @@ static void node_established_contact(struct tipc_node *n_ptr)
        n_ptr->bclink.acked = tipc_bclink_get_last_sent();
 
        if (n_ptr->bclink.supported) {
-               tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr);
+               tipc_bclink_add_node(n_ptr->addr);
                if (n_ptr->addr < tipc_own_addr)
                        tipc_own_tag++;
        }
@@ -350,9 +350,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                        n_ptr->bclink.defragm = NULL;
                }
 
-               tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
-               tipc_bclink_acknowledge(n_ptr,
-                                       mod(n_ptr->bclink.acked + 10000));
+               tipc_bclink_remove_node(n_ptr->addr);
+               tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
                if (n_ptr->addr < tipc_own_addr)
                        tipc_own_tag--;
 
@@ -361,7 +360,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
 
        /* Abort link changeover */
        for (i = 0; i < MAX_BEARERS; i++) {
-               struct link *l_ptr = n_ptr->links[i];
+               struct tipc_link *l_ptr = n_ptr->links[i];
                if (!l_ptr)
                        continue;
                l_ptr->reset_checkpoint = l_ptr->next_in_no;
index 4f15cb40aaa44491cfbbfca2a59d7f5f374599ee..0b1c5f8b6996a71383787e119e9890c832075939 100644 (file)
@@ -79,8 +79,8 @@ struct tipc_node {
        struct hlist_node hash;
        struct list_head list;
        struct list_head nsub;
-       struct link *active_links[2];
-       struct link *links[MAX_BEARERS];
+       struct tipc_link *active_links[2];
+       struct tipc_link *links[MAX_BEARERS];
        int link_cnt;
        int working_links;
        int block_setup;
@@ -117,10 +117,10 @@ extern u32 tipc_own_tag;
 struct tipc_node *tipc_node_find(u32 addr);
 struct tipc_node *tipc_node_create(u32 addr);
 void tipc_node_delete(struct tipc_node *n_ptr);
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr);
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 int tipc_node_active_links(struct tipc_node *n_ptr);
 int tipc_node_redundant_links(struct tipc_node *n_ptr);
 int tipc_node_is_up(struct tipc_node *n_ptr);
index 54d812a5a4d9dd272f330dcaa310c09d7499e29a..d91efc69e6f9b776868798b220e301f279e3eaf6 100644 (file)
@@ -80,7 +80,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
        struct tipc_msg *hdr;
        struct sk_buff *buf;
        struct sk_buff *ibuf = NULL;
-       struct port_list dports = {0, NULL, };
+       struct tipc_port_list dports = {0, NULL, };
        struct tipc_port *oport = tipc_port_deref(ref);
        int ext_targets;
        int res;
@@ -142,11 +142,11 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
  * If there is no port list, perform a lookup to create one
  */
 
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
 {
        struct tipc_msg *msg;
-       struct port_list dports = {0, NULL, };
-       struct port_list *item = dp;
+       struct tipc_port_list dports = {0, NULL, };
+       struct tipc_port_list *item = dp;
        int cnt = 0;
 
        msg = buf_msg(buf);
index b9aa34195aecd4d447daa1b46911ab83e887d6c3..f751807e2a913534246289610d5e3a07fd0a710d 100644 (file)
@@ -151,7 +151,7 @@ struct tipc_port {
 };
 
 extern spinlock_t tipc_port_list_lock;
-struct port_list;
+struct tipc_port_list;
 
 /*
  * TIPC port manipulation routines
@@ -228,7 +228,7 @@ int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
                              unsigned int total_len, int err);
 struct sk_buff *tipc_port_get_ports(void);
 void tipc_port_recv_proto_msg(struct sk_buff *buf);
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
 void tipc_port_reinit(void);
 
 /**
index 83116892528b448c0097ca3ae432e16995db8810..9e37b7812c3c361fa1a904b184ec98f74d4ce9e8 100644 (file)
@@ -110,8 +110,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
 
        /* allocate table & mark all entries as uninitialized */
 
-       table = __vmalloc(actual_size * sizeof(struct reference),
-                         GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+       table = vzalloc(actual_size * sizeof(struct reference));
        if (table == NULL)
                return -ENOMEM;
 
index 42b8324ff2eef6e27834c9ba1d9efd994dd3ba75..e2f7c5d370ba622d8dd44ddb6fe68d3466330d2f 100644 (file)
@@ -185,9 +185,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
 
        /* Validate arguments */
 
-       if (!net_eq(net, &init_net))
-               return -EAFNOSUPPORT;
-
        if (unlikely(protocol != 0))
                return -EPROTONOSUPPORT;
 
index 198371723b41816da5fe9116864f13d16faa188d..8c49566da8f3b2f4653b0a7be8bd47cdc2884eb6 100644 (file)
 #include "subscr.h"
 
 /**
- * struct subscriber - TIPC network topology subscriber
+ * struct tipc_subscriber - TIPC network topology subscriber
  * @port_ref: object reference to server port connecting to subscriber
  * @lock: pointer to spinlock controlling access to subscriber's server port
  * @subscriber_list: adjacent subscribers in top. server's list of subscribers
  * @subscription_list: list of subscription objects for this subscriber
  */
 
-struct subscriber {
+struct tipc_subscriber {
        u32 port_ref;
        spinlock_t *lock;
        struct list_head subscriber_list;
@@ -92,7 +92,7 @@ static u32 htohl(u32 in, int swap)
  *       try to take the lock if the message is rejected and returned!
  */
 
-static void subscr_send_event(struct subscription *sub,
+static void subscr_send_event(struct tipc_subscription *sub,
                              u32 found_lower,
                              u32 found_upper,
                              u32 event,
@@ -118,7 +118,7 @@ static void subscr_send_event(struct subscription *sub,
  * Returns 1 if there is overlap, otherwise 0.
  */
 
-int tipc_subscr_overlap(struct subscription *sub,
+int tipc_subscr_overlap(struct tipc_subscription *sub,
                        u32 found_lower,
                        u32 found_upper)
 
@@ -138,7 +138,7 @@ int tipc_subscr_overlap(struct subscription *sub,
  * Protected by nameseq.lock in name_table.c
  */
 
-void tipc_subscr_report_overlap(struct subscription *sub,
+void tipc_subscr_report_overlap(struct tipc_subscription *sub,
                                u32 found_lower,
                                u32 found_upper,
                                u32 event,
@@ -158,7 +158,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
  * subscr_timeout - subscription timeout has occurred
  */
 
-static void subscr_timeout(struct subscription *sub)
+static void subscr_timeout(struct tipc_subscription *sub)
 {
        struct tipc_port *server_port;
 
@@ -205,7 +205,7 @@ static void subscr_timeout(struct subscription *sub)
  * Called with subscriber port locked.
  */
 
-static void subscr_del(struct subscription *sub)
+static void subscr_del(struct tipc_subscription *sub)
 {
        tipc_nametbl_unsubscribe(sub);
        list_del(&sub->subscription_list);
@@ -224,11 +224,11 @@ static void subscr_del(struct subscription *sub)
  * simply wait for it to be released, then claim it.)
  */
 
-static void subscr_terminate(struct subscriber *subscriber)
+static void subscr_terminate(struct tipc_subscriber *subscriber)
 {
        u32 port_ref;
-       struct subscription *sub;
-       struct subscription *sub_temp;
+       struct tipc_subscription *sub;
+       struct tipc_subscription *sub_temp;
 
        /* Invalidate subscriber reference */
 
@@ -278,10 +278,10 @@ static void subscr_terminate(struct subscriber *subscriber)
  */
 
 static void subscr_cancel(struct tipc_subscr *s,
-                         struct subscriber *subscriber)
+                         struct tipc_subscriber *subscriber)
 {
-       struct subscription *sub;
-       struct subscription *sub_temp;
+       struct tipc_subscription *sub;
+       struct tipc_subscription *sub_temp;
        int found = 0;
 
        /* Find first matching subscription, exit if not found */
@@ -314,10 +314,10 @@ static void subscr_cancel(struct tipc_subscr *s,
  * Called with subscriber port locked.
  */
 
-static struct subscription *subscr_subscribe(struct tipc_subscr *s,
-                                            struct subscriber *subscriber)
+static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+                                            struct tipc_subscriber *subscriber)
 {
-       struct subscription *sub;
+       struct tipc_subscription *sub;
        int swap;
 
        /* Determine subscriber's endianness */
@@ -393,7 +393,7 @@ static void subscr_conn_shutdown_event(void *usr_handle,
                                       unsigned int size,
                                       int reason)
 {
-       struct subscriber *subscriber = usr_handle;
+       struct tipc_subscriber *subscriber = usr_handle;
        spinlock_t *subscriber_lock;
 
        if (tipc_port_lock(port_ref) == NULL)
@@ -416,9 +416,9 @@ static void subscr_conn_msg_event(void *usr_handle,
                                  const unchar *data,
                                  u32 size)
 {
-       struct subscriber *subscriber = usr_handle;
+       struct tipc_subscriber *subscriber = usr_handle;
        spinlock_t *subscriber_lock;
-       struct subscription *sub;
+       struct tipc_subscription *sub;
 
        /*
         * Lock subscriber's server port (& make a local copy of lock pointer,
@@ -471,12 +471,12 @@ static void subscr_named_msg_event(void *usr_handle,
                                   struct tipc_portid const *orig,
                                   struct tipc_name_seq const *dest)
 {
-       struct subscriber *subscriber;
+       struct tipc_subscriber *subscriber;
        u32 server_port_ref;
 
        /* Create subscriber object */
 
-       subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
+       subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
        if (subscriber == NULL) {
                warn("Subscriber rejected, no memory\n");
                return;
@@ -568,8 +568,8 @@ failed:
 
 void tipc_subscr_stop(void)
 {
-       struct subscriber *subscriber;
-       struct subscriber *subscriber_temp;
+       struct tipc_subscriber *subscriber;
+       struct tipc_subscriber *subscriber_temp;
        spinlock_t *subscriber_lock;
 
        if (topsrv.setup_port) {
index 4b06ef6f8401fe9d84fca894735d74162a19645b..ef6529c8456f6049f590f47de98226f6faeab144 100644 (file)
 #ifndef _TIPC_SUBSCR_H
 #define _TIPC_SUBSCR_H
 
-struct subscription;
+struct tipc_subscription;
 
 /**
- * struct subscription - TIPC network topology subscription object
+ * struct tipc_subscription - TIPC network topology subscription object
  * @seq: name sequence associated with subscription
  * @timeout: duration of subscription (in ms)
  * @filter: event filtering to be done for subscription
@@ -52,7 +52,7 @@ struct subscription;
  * @evt: template for events generated by subscription
  */
 
-struct subscription {
+struct tipc_subscription {
        struct tipc_name_seq seq;
        u32 timeout;
        u32 filter;
@@ -64,11 +64,11 @@ struct subscription {
        struct tipc_event evt;
 };
 
-int tipc_subscr_overlap(struct subscription *sub,
+int tipc_subscr_overlap(struct tipc_subscription *sub,
                        u32 found_lower,
                        u32 found_upper);
 
-void tipc_subscr_report_overlap(struct subscription *sub,
+void tipc_subscr_report_overlap(struct tipc_subscription *sub,
                                u32 found_lower,
                                u32 found_upper,
                                u32 event,
index 5a69733bcdadbfcbbca874083a0e950cd27bb9f2..c2128b10e5f98fe3d4c409c1314247f67512ccfc 100644 (file)
@@ -19,3 +19,10 @@ config UNIX
 
          Say Y unless you know what you are doing.
 
+config UNIX_DIAG
+       tristate "UNIX: socket monitoring interface"
+       depends on UNIX
+       default UNIX
+       ---help---
+         Support for UNIX socket monitoring interface used by the ss tool.
+         If unsure, say Y.
index b852a2bde9a8f03b7a5f3615024b0ed02774f98c..b663c607b1c613e875501cec703a439e1ca0bce3 100644 (file)
@@ -6,3 +6,6 @@ obj-$(CONFIG_UNIX)      += unix.o
 
 unix-y                 := af_unix.o garbage.o
 unix-$(CONFIG_SYSCTL)  += sysctl_net_unix.o
+
+obj-$(CONFIG_UNIX_DIAG)        += unix_diag.o
+unix_diag-y            := diag.o
index 466fbcc5cf77a92ef491be50eb836f652165da16..7cc3d7b23d1c4366b0aa6f85fc6a69084abcfde6 100644 (file)
 #include <net/checksum.h>
 #include <linux/security.h>
 
-static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
-static DEFINE_SPINLOCK(unix_table_lock);
+struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+EXPORT_SYMBOL_GPL(unix_socket_table);
+DEFINE_SPINLOCK(unix_table_lock);
+EXPORT_SYMBOL_GPL(unix_table_lock);
 static atomic_long_t unix_nr_socks;
 
 #define unix_sockets_unbound   (&unix_socket_table[UNIX_HASH_SIZE])
@@ -172,7 +174,7 @@ static inline int unix_recvq_full(struct sock const *sk)
        return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 }
 
-static struct sock *unix_peer_get(struct sock *s)
+struct sock *unix_peer_get(struct sock *s)
 {
        struct sock *peer;
 
@@ -183,6 +185,7 @@ static struct sock *unix_peer_get(struct sock *s)
        unix_state_unlock(s);
        return peer;
 }
+EXPORT_SYMBOL_GPL(unix_peer_get);
 
 static inline void unix_release_addr(struct unix_address *addr)
 {
@@ -1957,6 +1960,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
                            (UNIXCB(skb).cred != siocb->scm->cred)) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
+                               sk->sk_data_ready(sk, skb->len);
                                break;
                        }
                } else {
@@ -1974,6 +1978,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                chunk = min_t(unsigned int, skb->len, size);
                if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
                        skb_queue_head(&sk->sk_receive_queue, skb);
+                       sk->sk_data_ready(sk, skb->len);
                        if (copied == 0)
                                copied = -EFAULT;
                        break;
@@ -1991,6 +1996,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        /* put the skb back if we didn't use it up.. */
                        if (skb->len) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
+                               sk->sk_data_ready(sk, skb->len);
                                break;
                        }
 
@@ -2006,6 +2012,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                        /* put message back and return */
                        skb_queue_head(&sk->sk_receive_queue, skb);
+                       sk->sk_data_ready(sk, skb->len);
                        break;
                }
        } while (size);
@@ -2058,6 +2065,36 @@ static int unix_shutdown(struct socket *sock, int mode)
        return 0;
 }
 
+long unix_inq_len(struct sock *sk)
+{
+       struct sk_buff *skb;
+       long amount = 0;
+
+       if (sk->sk_state == TCP_LISTEN)
+               return -EINVAL;
+
+       spin_lock(&sk->sk_receive_queue.lock);
+       if (sk->sk_type == SOCK_STREAM ||
+           sk->sk_type == SOCK_SEQPACKET) {
+               skb_queue_walk(&sk->sk_receive_queue, skb)
+                       amount += skb->len;
+       } else {
+               skb = skb_peek(&sk->sk_receive_queue);
+               if (skb)
+                       amount = skb->len;
+       }
+       spin_unlock(&sk->sk_receive_queue.lock);
+
+       return amount;
+}
+EXPORT_SYMBOL_GPL(unix_inq_len);
+
+long unix_outq_len(struct sock *sk)
+{
+       return sk_wmem_alloc_get(sk);
+}
+EXPORT_SYMBOL_GPL(unix_outq_len);
+
 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
        struct sock *sk = sock->sk;
@@ -2066,33 +2103,16 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 
        switch (cmd) {
        case SIOCOUTQ:
-               amount = sk_wmem_alloc_get(sk);
+               amount = unix_outq_len(sk);
                err = put_user(amount, (int __user *)arg);
                break;
        case SIOCINQ:
-               {
-                       struct sk_buff *skb;
-
-                       if (sk->sk_state == TCP_LISTEN) {
-                               err = -EINVAL;
-                               break;
-                       }
-
-                       spin_lock(&sk->sk_receive_queue.lock);
-                       if (sk->sk_type == SOCK_STREAM ||
-                           sk->sk_type == SOCK_SEQPACKET) {
-                               skb_queue_walk(&sk->sk_receive_queue, skb)
-                                       amount += skb->len;
-                       } else {
-                               skb = skb_peek(&sk->sk_receive_queue);
-                               if (skb)
-                                       amount = skb->len;
-                       }
-                       spin_unlock(&sk->sk_receive_queue.lock);
+               amount = unix_inq_len(sk);
+               if (amount < 0)
+                       err = amount;
+               else
                        err = put_user(amount, (int __user *)arg);
-                       break;
-               }
-
+               break;
        default:
                err = -ENOIOCTLCMD;
                break;
diff --git a/net/unix/diag.c b/net/unix/diag.c
new file mode 100644 (file)
index 0000000..6b7697f
--- /dev/null
@@ -0,0 +1,329 @@
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/sock_diag.h>
+#include <linux/unix_diag.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/af_unix.h>
+#include <net/tcp_states.h>
+
+#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
+       RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
+
+static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct unix_address *addr = unix_sk(sk)->addr;
+       char *s;
+
+       if (addr) {
+               s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
+               memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
+       }
+
+       return 0;
+
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct dentry *dentry = unix_sk(sk)->dentry;
+       struct unix_diag_vfs *uv;
+
+       if (dentry) {
+               uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
+               uv->udiag_vfs_ino = dentry->d_inode->i_ino;
+               uv->udiag_vfs_dev = dentry->d_sb->s_dev;
+       }
+
+       return 0;
+
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct sock *peer;
+       int ino;
+
+       peer = unix_peer_get(sk);
+       if (peer) {
+               unix_state_lock(peer);
+               ino = sock_i_ino(peer);
+               unix_state_unlock(peer);
+               sock_put(peer);
+
+               RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
+       }
+
+       return 0;
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct sk_buff *skb;
+       u32 *buf;
+       int i;
+
+       if (sk->sk_state == TCP_LISTEN) {
+               spin_lock(&sk->sk_receive_queue.lock);
+               buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
+                               sk->sk_receive_queue.qlen * sizeof(u32));
+               i = 0;
+               skb_queue_walk(&sk->sk_receive_queue, skb) {
+                       struct sock *req, *peer;
+
+                       req = skb->sk;
+                       /*
+                        * The state lock is outer for the same sk's
+                        * queue lock. With the other's queue locked it's
+                        * OK to lock the state.
+                        */
+                       unix_state_lock_nested(req);
+                       peer = unix_sk(req)->peer;
+                       buf[i++] = (peer ? sock_i_ino(peer) : 0);
+                       unix_state_unlock(req);
+               }
+               spin_unlock(&sk->sk_receive_queue.lock);
+       }
+
+       return 0;
+
+rtattr_failure:
+       spin_unlock(&sk->sk_receive_queue.lock);
+       return -EMSGSIZE;
+}
+
+static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+{
+       struct unix_diag_rqlen *rql;
+
+       rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
+
+       if (sk->sk_state == TCP_LISTEN) {
+               rql->udiag_rqueue = sk->sk_receive_queue.qlen;
+               rql->udiag_wqueue = sk->sk_max_ack_backlog;
+       } else {
+               rql->udiag_rqueue = (__u32)unix_inq_len(sk);
+               rql->udiag_wqueue = (__u32)unix_outq_len(sk);
+       }
+
+       return 0;
+
+rtattr_failure:
+       return -EMSGSIZE;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+               u32 pid, u32 seq, u32 flags, int sk_ino)
+{
+       unsigned char *b = skb_tail_pointer(skb);
+       struct nlmsghdr *nlh;
+       struct unix_diag_msg *rep;
+
+       nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
+       nlh->nlmsg_flags = flags;
+
+       rep = NLMSG_DATA(nlh);
+
+       rep->udiag_family = AF_UNIX;
+       rep->udiag_type = sk->sk_type;
+       rep->udiag_state = sk->sk_state;
+       rep->udiag_ino = sk_ino;
+       sock_diag_save_cookie(sk, rep->udiag_cookie);
+
+       if ((req->udiag_show & UDIAG_SHOW_NAME) &&
+           sk_diag_dump_name(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_VFS) &&
+           sk_diag_dump_vfs(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_PEER) &&
+           sk_diag_dump_peer(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
+           sk_diag_dump_icons(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
+           sk_diag_show_rqlen(sk, skb))
+               goto nlmsg_failure;
+
+       if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
+           sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+               goto nlmsg_failure;
+
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+       return skb->len;
+
+nlmsg_failure:
+       nlmsg_trim(skb, b);
+       return -EMSGSIZE;
+}
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+               u32 pid, u32 seq, u32 flags)
+{
+       int sk_ino;
+
+       unix_state_lock(sk);
+       sk_ino = sock_i_ino(sk);
+       unix_state_unlock(sk);
+
+       if (!sk_ino)
+               return 0;
+
+       return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
+}
+
+static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct unix_diag_req *req;
+       int num, s_num, slot, s_slot;
+
+       req = NLMSG_DATA(cb->nlh);
+
+       s_slot = cb->args[0];
+       num = s_num = cb->args[1];
+
+       spin_lock(&unix_table_lock);
+       for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
+               struct sock *sk;
+               struct hlist_node *node;
+
+               num = 0;
+               sk_for_each(sk, node, &unix_socket_table[slot]) {
+                       if (num < s_num)
+                               goto next;
+                       if (!(req->udiag_states & (1 << sk->sk_state)))
+                               goto next;
+                       if (sk_diag_dump(sk, skb, req,
+                                        NETLINK_CB(cb->skb).pid,
+                                        cb->nlh->nlmsg_seq,
+                                        NLM_F_MULTI) < 0)
+                               goto done;
+next:
+                       num++;
+               }
+       }
+done:
+       spin_unlock(&unix_table_lock);
+       cb->args[0] = slot;
+       cb->args[1] = num;
+
+       return skb->len;
+}
+
+static struct sock *unix_lookup_by_ino(int ino)
+{
+       int i;
+       struct sock *sk;
+
+       spin_lock(&unix_table_lock);
+       for (i = 0; i <= UNIX_HASH_SIZE; i++) {
+               struct hlist_node *node;
+
+               sk_for_each(sk, node, &unix_socket_table[i])
+                       if (ino == sock_i_ino(sk)) {
+                               sock_hold(sk);
+                               spin_unlock(&unix_table_lock);
+
+                               return sk;
+                       }
+       }
+
+       spin_unlock(&unix_table_lock);
+       return NULL;
+}
+
+static int unix_diag_get_exact(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh,
+                              struct unix_diag_req *req)
+{
+       int err = -EINVAL;
+       struct sock *sk;
+       struct sk_buff *rep;
+       unsigned int extra_len;
+
+       if (req->udiag_ino == 0)
+               goto out_nosk;
+
+       sk = unix_lookup_by_ino(req->udiag_ino);
+       err = -ENOENT;
+       if (sk == NULL)
+               goto out_nosk;
+
+       err = sock_diag_check_cookie(sk, req->udiag_cookie);
+       if (err)
+               goto out;
+
+       extra_len = 256;
+again:
+       err = -ENOMEM;
+       rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
+                       GFP_KERNEL);
+       if (!rep)
+               goto out;
+
+       err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
+                          nlh->nlmsg_seq, 0, req->udiag_ino);
+       if (err < 0) {
+               kfree_skb(rep);
+               extra_len += 256;
+               if (extra_len >= PAGE_SIZE)
+                       goto out;
+
+               goto again;
+       }
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+                             MSG_DONTWAIT);
+       if (err > 0)
+               err = 0;
+out:
+       if (sk)
+               sock_put(sk);
+out_nosk:
+       return err;
+}
+
+static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+       int hdrlen = sizeof(struct unix_diag_req);
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP)
+               return netlink_dump_start(sock_diag_nlsk, skb, h,
+                                         unix_diag_dump, NULL, 0);
+       else
+               return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
+}
+
+static struct sock_diag_handler unix_diag_handler = {
+       .family = AF_UNIX,
+       .dump = unix_diag_handler_dump,
+};
+
+static int __init unix_diag_init(void)
+{
+       return sock_diag_register(&unix_diag_handler);
+}
+
+static void __exit unix_diag_exit(void)
+{
+       sock_diag_unregister(&unix_diag_handler);
+}
+
+module_init(unix_diag_init);
+module_exit(unix_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
index 3e16c6abde4f4bea8a800dbecfd2d98068d37cf8..a306bc66000e3ddece36a4743c35523f606cfee5 100644 (file)
@@ -232,7 +232,7 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
                return NOTIFY_DONE;
 
        if (dev->type == ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
         || dev->type == ARPHRD_ETHER
 #endif
         ) {
index fa2b41888bd9a78b931ff6a6be418de56498fe6b..f0ce862d1f46309b5482e7594be438e6744e2566 100644 (file)
@@ -161,7 +161,7 @@ void x25_establish_link(struct x25_neigh *nb)
                *ptr = X25_IFACE_CONNECT;
                break;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
        case ARPHRD_ETHER:
                return;
 #endif
@@ -180,7 +180,7 @@ void x25_terminate_link(struct x25_neigh *nb)
        struct sk_buff *skb;
        unsigned char *ptr;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
        if (nb->dev->type == ARPHRD_ETHER)
                return;
 #endif
@@ -213,7 +213,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
                *dptr = X25_IFACE_DATA;
                break;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
        case ARPHRD_ETHER:
                kfree_skb(skb);
                return;
index 97d77c532d8c95550711c915cb2b02d65d99828f..cf6366270054f10750b924543aa1c8df38a5027f 100644 (file)
@@ -134,7 +134,7 @@ struct net_device *x25_dev_get(char *devname)
 
        if (dev &&
            (!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
                                        && dev->type != ARPHRD_ETHER
 #endif
                                        ))){
index 552df27dcf53d3388fb2eb9ab3ba7a31c7ffcd64..7661576b6f455ae0effe6555b913385e30faa886 100644 (file)
@@ -61,8 +61,8 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
        const struct flowi4 *fl4 = &fl->u.ip4;
 
-       return  addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) &&
-               addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) &&
+       return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
+               addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
                !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
                !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
                (fl4->flowi4_proto == sel->proto || !sel->proto) &&
@@ -1340,7 +1340,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
        case AF_INET:
                dst_ops = &net->xfrm.xfrm4_dst_ops;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                dst_ops = &net->xfrm.xfrm6_dst_ops;
                break;
@@ -1499,7 +1499,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                goto free_dst;
 
        /* Copy neighbour for reachability confirmation */
-       dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
+       dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
 
        xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
        xfrm_init_pmtu(dst_prev);
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
 {
        struct dst_entry *head, *next;
 
-       flow_cache_flush();
-
        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
        head = xfrm_policy_sk_bundles;
        xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
+static void xfrm_garbage_collect(struct net *net)
+{
+       flow_cache_flush();
+       __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+       flow_cache_flush_deferred();
+       __xfrm_garbage_collect(net);
+}
+
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
        do {
@@ -2382,9 +2392,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
        return dst_metric_advmss(dst->path);
 }
 
-static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
+static unsigned int xfrm_mtu(const struct dst_entry *dst)
 {
-       return dst_mtu(dst->path);
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst_mtu(dst->path);
 }
 
 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -2411,8 +2423,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->check = xfrm_dst_check;
                if (likely(dst_ops->default_advmss == NULL))
                        dst_ops->default_advmss = xfrm_default_advmss;
-               if (likely(dst_ops->default_mtu == NULL))
-                       dst_ops->default_mtu = xfrm_default_mtu;
+               if (likely(dst_ops->mtu == NULL))
+                       dst_ops->mtu = xfrm_mtu;
                if (likely(dst_ops->negative_advice == NULL))
                        dst_ops->negative_advice = xfrm_negative_advice;
                if (likely(dst_ops->link_failure == NULL))
@@ -2420,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = __xfrm_garbage_collect;
+                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
                xfrm_policy_afinfo[afinfo->family] = afinfo;
        }
        write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2433,7 +2445,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                case AF_INET:
                        xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
                        break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6:
                        xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
                        break;
@@ -2483,7 +2495,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
        afinfo = xfrm_policy_afinfo[AF_INET];
        if (afinfo)
                net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        afinfo = xfrm_policy_afinfo[AF_INET6];
        if (afinfo)
                net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
@@ -2514,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
 
        switch (event) {
        case NETDEV_DOWN:
-               __xfrm_garbage_collect(dev_net(dev));
+               xfrm_garbage_collect(dev_net(dev));
        }
        return NOTIFY_DONE;
 }
index 9414b9c5b1e4284b9ed90e9fe3ff39cdfc07e2ce..5b228f97d4b3308a950ae857828e04e90df86848 100644 (file)
@@ -1035,16 +1035,12 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
                        break;
 
                case AF_INET6:
-                       ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
-                                      (const struct in6_addr *)daddr);
-                       ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
-                                      (const struct in6_addr *)saddr);
+                       *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
+                       *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
                        x->sel.prefixlen_d = 128;
                        x->sel.prefixlen_s = 128;
-                       ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
-                                      (const struct in6_addr *)saddr);
-                       ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
-                                      (const struct in6_addr *)daddr);
+                       *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
+                       *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
                        break;
                }
 
index d0a42df5160ef7abb50a5554f80c40839ed1ad86..e0d747a2e80341eb736c4348212770c3db41c4ad 100644 (file)
@@ -28,7 +28,7 @@
 #include <net/netlink.h>
 #include <net/ah.h>
 #include <asm/uaccess.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #endif
 
@@ -150,7 +150,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                break;
 
        case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                break;
 #else
                err = -EAFNOSUPPORT;
@@ -201,7 +201,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                        goto out;
                break;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case IPPROTO_DSTOPTS:
        case IPPROTO_ROUTING:
                if (attrs[XFRMA_ALG_COMP]       ||
@@ -1160,7 +1160,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
                break;
 
        case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                break;
 #else
                return  -EAFNOSUPPORT;
@@ -1231,7 +1231,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                switch (ut[i].family) {
                case AF_INET:
                        break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6:
                        break;
 #endif
@@ -2604,7 +2604,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
                        return NULL;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                if (opt != IPV6_XFRM_POLICY) {
                        *dir = -EOPNOTSUPP;
index ba573fe7c74d5bfe0495372931ebff69406f35c2..914833d99b06f78242fa12584c568ecef6a5e65a 100644 (file)
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            --directory=$(srctree) --directory=$(objtree)           \
            --output $(obj)/config.pot
        $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
-       $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
-       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`;    \
+       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig      \
+           $(srctree)/arch/*/um/Kconfig`;               \
            do                                           \
                echo "  GEN $$i";                        \
                $(obj)/kxgettext $$i                     \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            done )
        $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
            --output $(obj)/linux.pot
-       $(Q)rm -f $(srctree)/arch/um/Kconfig
        $(Q)rm -f $(obj)/config.pot
 
 PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
index 36cc0cc39e78e135630384e6500f4a789fe7aa2a..b566eba4a65cc16128b34152154c3ba26505b692 100644 (file)
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
 static int d_namespace_path(struct path *path, char *buf, int buflen,
                            char **name, int flags)
 {
-       struct path root, tmp;
        char *res;
-       int connected, error = 0;
+       int error = 0;
+       int connected = 1;
+
+       if (path->mnt->mnt_flags & MNT_INTERNAL) {
+               /* it's not mounted anywhere */
+               res = dentry_path(path->dentry, buf, buflen);
+               *name = res;
+               if (IS_ERR(res)) {
+                       *name = buf;
+                       return PTR_ERR(res);
+               }
+               if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+                   strncmp(*name, "/sys/", 5) == 0) {
+                       /* TODO: convert over to using a per namespace
+                        * control instead of hard coded /proc
+                        */
+                       return prepend(name, *name - buf, "/proc", 5);
+               }
+               return 0;
+       }
 
-       /* Get the root we want to resolve too, released below */
+       /* resolve paths relative to chroot?*/
        if (flags & PATH_CHROOT_REL) {
-               /* resolve paths relative to chroot */
+               struct path root;
                get_fs_root(current->fs, &root);
-       } else {
-               /* resolve paths relative to namespace */
-               root.mnt = current->nsproxy->mnt_ns->root;
-               root.dentry = root.mnt->mnt_root;
-               path_get(&root);
+               res = __d_path(path, &root, buf, buflen);
+               if (res && !IS_ERR(res)) {
+                       /* everything's fine */
+                       *name = res;
+                       path_put(&root);
+                       goto ok;
+               }
+               path_put(&root);
+               connected = 0;
        }
 
-       tmp = root;
-       res = __d_path(path, &tmp, buf, buflen);
+       res = d_absolute_path(path, buf, buflen);
 
        *name = res;
        /* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
+       if (!our_mnt(path->mnt))
+               connected = 0;
 
+ok:
        /* Handle two cases:
         * 1. A deleted dentry && profile is not allowing mediation of deleted
         * 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                        goto out;
        }
 
-       /* Determine if the path is connected to the expected root */
-       connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
-       /* If the path is not connected,
+       /* If the path is not connected to the expected root,
         * check if it is a sysctl and handle specially else remove any
         * leading / that __d_path may have returned.
         * Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *     namespace root.
         */
        if (!connected) {
-               /* is the disconnect path a sysctl? */
-               if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
-                   strncmp(*name, "/sys/", 5) == 0) {
-                       /* TODO: convert over to using a per namespace
-                        * control instead of hard coded /proc
-                        */
-                       error = prepend(name, *name - buf, "/proc", 5);
-               } else if (!(flags & PATH_CONNECT_PATH) &&
+               if (!(flags & PATH_CONNECT_PATH) &&
                           !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
-                            (tmp.mnt == current->nsproxy->mnt_ns->root &&
-                             tmp.dentry == tmp.mnt->mnt_root))) {
+                            our_mnt(path->mnt))) {
                        /* disconnected path, don't return pathname starting
                         * with '/'
                         */
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
 out:
-       path_put(&root);
-
        return error;
 }
 
index 5dd5b140242cd8872b255c9e88878ab56155d07c..8738deff26fadde6a6bf200972d0422193e4fdc9 100644 (file)
@@ -27,20 +27,35 @@ static int evmkey_len = MAX_KEY_SIZE;
 
 struct crypto_shash *hmac_tfm;
 
+static DEFINE_MUTEX(mutex);
+
 static struct shash_desc *init_desc(void)
 {
        int rc;
        struct shash_desc *desc;
 
        if (hmac_tfm == NULL) {
+               mutex_lock(&mutex);
+               if (hmac_tfm)
+                       goto out;
                hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
                if (IS_ERR(hmac_tfm)) {
                        pr_err("Can not allocate %s (reason: %ld)\n",
                               evm_hmac, PTR_ERR(hmac_tfm));
                        rc = PTR_ERR(hmac_tfm);
                        hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
+                       return ERR_PTR(rc);
+               }
+               rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+               if (rc) {
+                       crypto_free_shash(hmac_tfm);
+                       hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
                        return ERR_PTR(rc);
                }
+out:
+               mutex_unlock(&mutex);
        }
 
        desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@ static struct shash_desc *init_desc(void)
        desc->tfm = hmac_tfm;
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
-       if (rc)
-               goto out;
        rc = crypto_shash_init(desc);
-out:
        if (rc) {
                kfree(desc);
                return ERR_PTR(rc);
index 6bc7a86d1027cbf5f78b67804448f577e869d093..d6f8433250a5a37db4543b61c1b73303f92be574 100644 (file)
@@ -2,5 +2,9 @@
 # Makefile for encrypted keys
 #
 
-obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o ecryptfs_format.o
-obj-$(CONFIG_TRUSTED_KEYS) += masterkey_trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o
+
+encrypted-keys-y := encrypted.o ecryptfs_format.o
+masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
+masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
+encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
index dcc843cb0f80de0f899ada23d8bf80d25e023e5e..41144f71d6154f612570f8f2c5243269becf68a4 100644 (file)
@@ -444,7 +444,7 @@ static struct key *request_master_key(struct encrypted_key_payload *epayload,
                goto out;
 
        if (IS_ERR(mkey)) {
-               int ret = PTR_ERR(epayload);
+               int ret = PTR_ERR(mkey);
 
                if (ret == -ENOTSUPP)
                        pr_info("encrypted_key: key %s not supported",
index b6ade8945250c0b3f13913c56ee5b9686a3f39d1..8136a2d44c63ddb3a76f30f35b6780ff19b74ae9 100644 (file)
@@ -2,7 +2,8 @@
 #define __ENCRYPTED_KEY_H
 
 #define ENCRYPTED_DEBUG 0
-#ifdef CONFIG_TRUSTED_KEYS
+#if defined(CONFIG_TRUSTED_KEYS) || \
+  (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
 extern struct key *request_trusted_key(const char *trusted_desc,
                                       u8 **master_key, size_t *master_keylen);
 #else
index 5b366d7af3c4dc17b595c67af3dbe99d5fd69df1..69ff52c08e97bb0eab715b1ea61c709e4f80faad 100644 (file)
@@ -102,7 +102,8 @@ int user_update(struct key *key, const void *data, size_t datalen)
                key->expiry = 0;
        }
 
-       kfree_rcu(zap, rcu);
+       if (zap)
+               kfree_rcu(zap, rcu);
 
 error:
        return ret;
index 893af8a2fa1e994c518b7649ddc9d29b61c8681a..7bd6f138236b3a010d457ab473ea73fd02641c8c 100644 (file)
@@ -114,19 +114,20 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
        int offset, ret = 0;
        struct ipv6hdr *ip6;
        u8 nexthdr;
+       __be16 frag_off;
 
        ip6 = ipv6_hdr(skb);
        if (ip6 == NULL)
                return -EINVAL;
-       ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
-       ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+       ad->u.net.v6info.saddr = ip6->saddr;
+       ad->u.net.v6info.daddr = ip6->daddr;
        ret = 0;
        /* IPv6 can have several extension header before the Transport header
         * skip them */
        offset = skb_network_offset(skb);
        offset += sizeof(*ip6);
        nexthdr = ip6->nexthdr;
-       offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+       offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
        if (offset < 0)
                return 0;
        if (proto)
index 0c6cc69c8f86d68fb24f1b954c64370e8da88c4a..e2f684aeb70c152a61038c0d2d19525774144d59 100644 (file)
@@ -381,7 +381,7 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
                                     void **value, size_t *len)
 {
        if (unlikely(IS_PRIVATE(inode)))
-               return 0;
+               return -EOPNOTSUPP;
        return security_ops->inode_init_security(inode, dir, qstr, name, value,
                                                 len);
 }
index 1126c10a5e821d1d835aa0aa7d83dfba5a04028f..86305c2f555a383bec19ea53af81e20dae99b42d 100644 (file)
@@ -1090,7 +1090,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
                        return SECCLASS_NETLINK_ROUTE_SOCKET;
                case NETLINK_FIREWALL:
                        return SECCLASS_NETLINK_FIREWALL_SOCKET;
-               case NETLINK_INET_DIAG:
+               case NETLINK_SOCK_DIAG:
                        return SECCLASS_NETLINK_TCPDIAG_SOCKET;
                case NETLINK_NFLOG:
                        return SECCLASS_NETLINK_NFLOG_SOCKET;
@@ -3561,19 +3561,20 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
        u8 nexthdr;
        int ret = -EINVAL, offset;
        struct ipv6hdr _ipv6h, *ip6;
+       __be16 frag_off;
 
        offset = skb_network_offset(skb);
        ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
        if (ip6 == NULL)
                goto out;
 
-       ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
-       ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+       ad->u.net.v6info.saddr = ip6->saddr;
+       ad->u.net.v6info.daddr = ip6->daddr;
        ret = 0;
 
        nexthdr = ip6->nexthdr;
        offset += sizeof(_ipv6h);
-       offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+       offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
        if (offset < 0)
                goto out;
 
@@ -3871,7 +3872,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                if (family == PF_INET)
                        ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
                else
-                       ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr);
+                       ad.u.net.v6info.saddr = addr6->sin6_addr;
 
                err = avc_has_perm(sksec->sid, sid,
                                   sksec->sclass, node_perm, &ad);
index 3bf46abaa688cbb7735f1d47cf3d33348f9b019a..86365857c0887b8bd4551c0dc7667c0fba9bf9e2 100644 (file)
@@ -220,7 +220,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
        case PF_INET6:
                ret = security_node_sid(PF_INET6,
                                        addr, sizeof(struct in6_addr), sid);
-               ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
+               new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
                break;
        default:
                BUG();
index 0b62bd112461c712cfc9425f9e8ac0f6caf194f0..7b9eb1faf68b5fe435b7f70e36ff21330934d6a2 100644 (file)
@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
        if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
                struct sel_netport *tail;
                tail = list_entry(
-                       rcu_dereference(sel_netport_hash[idx].list.prev),
+                       rcu_dereference_protected(
+                               sel_netport_hash[idx].list.prev,
+                               lockdep_is_held(&sel_netport_lock)),
                        struct sel_netport, list);
                list_del_rcu(&tail->list);
                kfree_rcu(tail, rcu);
index 6aceef518a41fe7cd095bf8f860afe210abeb6ce..5c32f36ff70618dfb08e3040c94060238dfa44da 100644 (file)
@@ -102,9 +102,6 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
 
 const char *smack_cipso_option = SMACK_CIPSO_OPTION;
 
-
-#define        SEQ_READ_FINISHED       ((loff_t)-1)
-
 /*
  * Values for parsing cipso rules
  * SMK_DIGITLEN: Length of a digit field in a rule.
@@ -357,10 +354,12 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
 
        rc = count;
        /*
+        * If this is "load" as opposed to "load-self" and a new rule
+        * it needs to get added for reporting.
         * smk_set_access returns true if there was already a rule
         * for the subject/object pair, and false if it was new.
         */
-       if (!smk_set_access(rule, rule_list, rule_lock)) {
+       if (load && !smk_set_access(rule, rule_list, rule_lock)) {
                smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
                if (smlp != NULL) {
                        smlp->smk_rule = rule;
@@ -377,12 +376,12 @@ out:
        return rc;
 }
 
-
 /*
- * Seq_file read operations for /smack/load
+ * Core logic for smackfs seq list operations.
  */
 
-static void *load_seq_start(struct seq_file *s, loff_t *pos)
+static void *smk_seq_start(struct seq_file *s, loff_t *pos,
+                               struct list_head *head)
 {
        struct list_head *list;
 
@@ -390,7 +389,7 @@ static void *load_seq_start(struct seq_file *s, loff_t *pos)
         * This is 0 the first time through.
         */
        if (s->index == 0)
-               s->private = &smack_rule_list;
+               s->private = head;
 
        if (s->private == NULL)
                return NULL;
@@ -404,11 +403,12 @@ static void *load_seq_start(struct seq_file *s, loff_t *pos)
        return list;
 }
 
-static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
+static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
+                               struct list_head *head)
 {
        struct list_head *list = v;
 
-       if (list_is_last(list, &smack_rule_list)) {
+       if (list_is_last(list, head)) {
                s->private = NULL;
                return NULL;
        }
@@ -416,6 +416,25 @@ static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
        return list->next;
 }
 
+static void smk_seq_stop(struct seq_file *s, void *v)
+{
+       /* No-op */
+}
+
+/*
+ * Seq_file read operations for /smack/load
+ */
+
+static void *load_seq_start(struct seq_file *s, loff_t *pos)
+{
+       return smk_seq_start(s, pos, &smack_rule_list);
+}
+
+static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       return smk_seq_next(s, v, pos, &smack_rule_list);
+}
+
 static int load_seq_show(struct seq_file *s, void *v)
 {
        struct list_head *list = v;
@@ -446,16 +465,11 @@ static int load_seq_show(struct seq_file *s, void *v)
        return 0;
 }
 
-static void load_seq_stop(struct seq_file *s, void *v)
-{
-       /* No-op */
-}
-
 static const struct seq_operations load_seq_ops = {
        .start = load_seq_start,
        .next  = load_seq_next,
        .show  = load_seq_show,
-       .stop  = load_seq_stop,
+       .stop  = smk_seq_stop,
 };
 
 /**
@@ -574,28 +588,12 @@ static void smk_unlbl_ambient(char *oldambient)
 
 static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
 {
-       if (*pos == SEQ_READ_FINISHED)
-               return NULL;
-       if (list_empty(&smack_known_list))
-               return NULL;
-
-       return smack_known_list.next;
+       return smk_seq_start(s, pos, &smack_known_list);
 }
 
 static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct list_head  *list = v;
-
-       /*
-        * labels with no associated cipso value wont be printed
-        * in cipso_seq_show
-        */
-       if (list_is_last(list, &smack_known_list)) {
-               *pos = SEQ_READ_FINISHED;
-               return NULL;
-       }
-
-       return list->next;
+       return smk_seq_next(s, v, pos, &smack_known_list);
 }
 
 /*
@@ -634,16 +632,11 @@ static int cipso_seq_show(struct seq_file *s, void *v)
        return 0;
 }
 
-static void cipso_seq_stop(struct seq_file *s, void *v)
-{
-       /* No-op */
-}
-
 static const struct seq_operations cipso_seq_ops = {
        .start = cipso_seq_start,
-       .stop  = cipso_seq_stop,
        .next  = cipso_seq_next,
        .show  = cipso_seq_show,
+       .stop  = smk_seq_stop,
 };
 
 /**
@@ -788,23 +781,12 @@ static const struct file_operations smk_cipso_ops = {
 
 static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos)
 {
-       if (*pos == SEQ_READ_FINISHED)
-               return NULL;
-       if (list_empty(&smk_netlbladdr_list))
-               return NULL;
-       return smk_netlbladdr_list.next;
+       return smk_seq_start(s, pos, &smk_netlbladdr_list);
 }
 
 static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct list_head *list = v;
-
-       if (list_is_last(list, &smk_netlbladdr_list)) {
-               *pos = SEQ_READ_FINISHED;
-               return NULL;
-       }
-
-       return list->next;
+       return smk_seq_next(s, v, pos, &smk_netlbladdr_list);
 }
 #define BEBITS (sizeof(__be32) * 8)
 
@@ -828,16 +810,11 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
        return 0;
 }
 
-static void netlbladdr_seq_stop(struct seq_file *s, void *v)
-{
-       /* No-op */
-}
-
 static const struct seq_operations netlbladdr_seq_ops = {
        .start = netlbladdr_seq_start,
-       .stop  = netlbladdr_seq_stop,
        .next  = netlbladdr_seq_next,
        .show  = netlbladdr_seq_show,
+       .stop  = smk_seq_stop,
 };
 
 /**
@@ -1405,23 +1382,14 @@ static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
 {
        struct task_smack *tsp = current_security();
 
-       if (*pos == SEQ_READ_FINISHED)
-               return NULL;
-       if (list_empty(&tsp->smk_rules))
-               return NULL;
-       return tsp->smk_rules.next;
+       return smk_seq_start(s, pos, &tsp->smk_rules);
 }
 
 static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
        struct task_smack *tsp = current_security();
-       struct list_head *list = v;
 
-       if (list_is_last(list, &tsp->smk_rules)) {
-               *pos = SEQ_READ_FINISHED;
-               return NULL;
-       }
-       return list->next;
+       return smk_seq_next(s, v, pos, &tsp->smk_rules);
 }
 
 static int load_self_seq_show(struct seq_file *s, void *v)
@@ -1453,16 +1421,11 @@ static int load_self_seq_show(struct seq_file *s, void *v)
        return 0;
 }
 
-static void load_self_seq_stop(struct seq_file *s, void *v)
-{
-       /* No-op */
-}
-
 static const struct seq_operations load_self_seq_ops = {
        .start = load_self_seq_start,
        .next  = load_self_seq_next,
        .show  = load_self_seq_show,
-       .stop  = load_self_seq_stop,
+       .stop  = smk_seq_stop,
 };
 
 
index 738bbdf8d4c77ceba3ba3abfe733a90dc56cd4fb..d9f3ced8756ec4dc87492b8edecf172375843f5d 100644 (file)
@@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
 {
        char *pos = ERR_PTR(-ENOMEM);
        if (buflen >= 256) {
-               struct path ns_root = { };
                /* go to whatever namespace root we are under */
-               pos = __d_path(path, &ns_root, buffer, buflen - 1);
+               pos = d_absolute_path(path, buffer, buflen - 1);
                if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
                        struct inode *inode = path->dentry->d_inode;
                        if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
-               else
+               else {
                        pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+                       /*
+                        * Fall back to local name if absolute name is not
+                        * available.
+                        */
+                       if (pos == ERR_PTR(-EINVAL))
+                               pos = tomoyo_get_local_path(path->dentry, buf,
+                                                           buf_len - 1);
+               }
 encode:
                if (IS_ERR(pos))
                        continue;
index 6e5addeb236b49595ea563e7525d5a262a32e295..73516f69ac7ca8a33244cb300df8958ac2d77e20 100644 (file)
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
                /* AC97 v2.2 specifications says minimum 1 us. */
                udelay(2);
                gpio_set_value(chip->reset_pin, 1);
+       } else {
+               ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+               udelay(2);
+               ac97c_writel(chip, MR, AC97C_MR_ENA);
        }
 }
 
index e083122ca55af550f6d4b72e6d5cea5c8c2e1a05..dbf94b189e7576b6e8a3194c5612b0397e6060ee 100644 (file)
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
                struct cs5535audio_dma_desc *desc =
                        &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
                desc->addr = cpu_to_le32(addr);
-               desc->size = cpu_to_le32(period_bytes);
+               desc->size = cpu_to_le16(period_bytes);
                desc->ctlreserved = cpu_to_le16(PRD_EOP);
                desc_addr += sizeof(struct cs5535audio_dma_desc);
                addr += period_bytes;
index e44b107fdc7594c47df5458b7acc0c6440454561..4562e9de6a1ab0dc7015e28d7be14c2eafa5a6b3 100644 (file)
@@ -4046,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
 
        /* Search for codec ID */
        for (q = tbl; q->subvendor; q++) {
-               unsigned long vendorid = (q->subdevice) | (q->subvendor << 16);
-
-               if (vendorid == codec->subsystem_id)
+               unsigned int mask = 0xffff0000 | q->subdevice_mask;
+               unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
+               if ((codec->subsystem_id & mask) == id)
                        break;
        }
 
index 1c8ddf547a2dde5b1426fdfb6d26261ea35e6872..c1da422e085a5230d8b7eb837925772b11ea4328 100644 (file)
@@ -297,10 +297,18 @@ static int hdmi_update_eld(struct hdmi_eld *e,
                                        buf + ELD_FIXED_BYTES + mnl + 3 * i);
        }
 
+       /*
+        * HDMI sink's ELD info cannot always be retrieved for now, e.g.
+        * in console or for audio devices. Assume the highest speakers
+        * configuration, to _not_ prohibit multi-channel audio playback.
+        */
+       if (!e->spk_alloc)
+               e->spk_alloc = 0xffff;
+
+       e->eld_valid = true;
        return 0;
 
 out_fail:
-       e->eld_ver = 0;
        return -EINVAL;
 }
 
@@ -323,9 +331,6 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
         * ELD is valid, actual eld_size is assigned in hdmi_update_eld()
         */
 
-       if (!eld->eld_valid)
-               return -ENOENT;
-
        size = snd_hdmi_get_eld_size(codec, nid);
        if (size == 0) {
                /* wfg: workaround for ASUS P5E-VM HDMI board */
@@ -342,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
 
        for (i = 0; i < size; i++) {
                unsigned int val = hdmi_get_eld_data(codec, nid, i);
+               /*
+                * Graphics driver might be writing to ELD buffer right now.
+                * Just abort. The caller will repoll after a while.
+                */
                if (!(val & AC_ELDD_ELD_VALID)) {
-                       if (!i) {
-                               snd_printd(KERN_INFO
-                                          "HDMI: invalid ELD data\n");
-                               ret = -EINVAL;
-                               goto error;
-                       }
                        snd_printd(KERN_INFO
                                  "HDMI: invalid ELD data byte %d\n", i);
-                       val = 0;
-               } else
-                       val &= AC_ELDD_ELD_DATA;
+                       ret = -EINVAL;
+                       goto error;
+               }
+               val &= AC_ELDD_ELD_DATA;
+               /*
+                * The first byte cannot be zero. This can happen on some DVI
+                * connections. Some Intel chips may also need some 250ms delay
+                * to return non-zero ELD data, even when the graphics driver
+                * correctly writes ELD content before setting ELD_valid bit.
+                */
+               if (!val && !i) {
+                       snd_printdd(KERN_INFO "HDMI: 0 ELD data\n");
+                       ret = -EINVAL;
+                       goto error;
+               }
                buf[i] = val;
        }
 
index 096507d2ca9a7323c8d8e674ff4da7d921e677e4..c2f79e63124d82cb32ee0a47668f8997bc73e298 100644 (file)
@@ -2507,8 +2507,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
@@ -2971,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE},
+         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+       /* ICH */
        { PCI_DEVICE(0x8086, 0x2668),
          .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
          AZX_DCAPS_BUFSIZE },  /* ICH6 */
index 6579e0f2bb57d6b6c99814cb06b9f1c194eedb66..618ddad172369b22062ad97114dd783841ccde43 100644 (file)
@@ -653,6 +653,9 @@ struct hdmi_eld {
        int     spk_alloc;
        int     sad_count;
        struct cea_sad sad[ELD_MAX_SAD];
+       /*
+        * all fields above eld_buffer will be cleared before updating ELD
+        */
        char    eld_buffer[ELD_MAX_SIZE];
 #ifdef CONFIG_PROC_FS
        struct snd_info_entry *proc_entry;
index 2a2d8645ba0933e8e9e20ef3bd33a656f1bb69c5..70a7abda7e225744bf93d90f795c86bfcd95dac6 100644 (file)
@@ -58,6 +58,8 @@ struct cs_spec {
        unsigned int gpio_mask;
        unsigned int gpio_dir;
        unsigned int gpio_data;
+       unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
+       unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
 
        struct hda_pcm pcm_rec[2];      /* PCM information */
 
@@ -76,6 +78,7 @@ enum {
        CS420X_MBP53,
        CS420X_MBP55,
        CS420X_IMAC27,
+       CS420X_APPLE,
        CS420X_AUTO,
        CS420X_MODELS
 };
@@ -237,6 +240,15 @@ static int cs_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
        return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
 }
 
+static void cs_update_input_select(struct hda_codec *codec)
+{
+       struct cs_spec *spec = codec->spec;
+       if (spec->cur_adc)
+               snd_hda_codec_write(codec, spec->cur_adc, 0,
+                                   AC_VERB_SET_CONNECT_SEL,
+                                   spec->adc_idx[spec->cur_input]);
+}
+
 /*
  * Analog capture
  */
@@ -250,6 +262,7 @@ static int cs_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
        spec->cur_adc = spec->adc_nid[spec->cur_input];
        spec->cur_adc_stream_tag = stream_tag;
        spec->cur_adc_format = format;
+       cs_update_input_select(codec);
        snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
        return 0;
 }
@@ -689,10 +702,8 @@ static int change_cur_input(struct hda_codec *codec, unsigned int idx,
                                           spec->cur_adc_stream_tag, 0,
                                           spec->cur_adc_format);
        }
-       snd_hda_codec_write(codec, spec->cur_adc, 0,
-                           AC_VERB_SET_CONNECT_SEL,
-                           spec->adc_idx[idx]);
        spec->cur_input = idx;
+       cs_update_input_select(codec);
        return 1;
 }
 
@@ -920,10 +931,9 @@ static void cs_automute(struct hda_codec *codec)
                                        spdif_present ? 0 : PIN_OUT);
                }
        }
-       if (spec->board_config == CS420X_MBP53 ||
-           spec->board_config == CS420X_MBP55 ||
-           spec->board_config == CS420X_IMAC27) {
-               unsigned int gpio = hp_present ? 0x02 : 0x08;
+       if (spec->gpio_eapd_hp) {
+               unsigned int gpio = hp_present ?
+                       spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
                snd_hda_codec_write(codec, 0x01, 0,
                                    AC_VERB_SET_GPIO_DATA, gpio);
        }
@@ -973,10 +983,7 @@ static void cs_automic(struct hda_codec *codec)
                } else  {
                        spec->cur_input = spec->last_input;
                }
-
-               snd_hda_codec_write_cache(codec, spec->cur_adc, 0,
-                                       AC_VERB_SET_CONNECT_SEL,
-                                       spec->adc_idx[spec->cur_input]);
+               cs_update_input_select(codec);
        } else {
                if (present)
                        change_cur_input(codec, spec->automic_idx, 0);
@@ -1073,9 +1080,7 @@ static void init_input(struct hda_codec *codec)
                        cs_automic(codec);
                else  {
                        spec->cur_adc = spec->adc_nid[spec->cur_input];
-                       snd_hda_codec_write(codec, spec->cur_adc, 0,
-                                       AC_VERB_SET_CONNECT_SEL,
-                                       spec->adc_idx[spec->cur_input]);
+                       cs_update_input_select(codec);
                }
        } else {
                change_cur_input(codec, spec->cur_input, 1);
@@ -1273,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = {
        [CS420X_MBP53] = "mbp53",
        [CS420X_MBP55] = "mbp55",
        [CS420X_IMAC27] = "imac27",
+       [CS420X_APPLE] = "apple",
        [CS420X_AUTO] = "auto",
 };
 
@@ -1282,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
        SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
        SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
        SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
-       SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
+       /* this conflicts with too many other models */
+       /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
+       {} /* terminator */
+};
+
+static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
+       SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
        {} /* terminator */
 };
 
@@ -1364,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec)
        spec->board_config =
                snd_hda_check_board_config(codec, CS420X_MODELS,
                                           cs420x_models, cs420x_cfg_tbl);
+       if (spec->board_config < 0)
+               spec->board_config =
+                       snd_hda_check_board_codec_sid_config(codec,
+                               CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
        if (spec->board_config >= 0)
                fix_pincfg(codec, spec->board_config, cs_pincfgs);
 
@@ -1371,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec)
        case CS420X_IMAC27:
        case CS420X_MBP53:
        case CS420X_MBP55:
-               /* GPIO1 = headphones */
-               /* GPIO3 = speakers */
-               spec->gpio_mask = 0x0a;
-               spec->gpio_dir = 0x0a;
+       case CS420X_APPLE:
+               spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
+               spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
+               spec->gpio_mask = spec->gpio_dir =
+                       spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
                break;
        }
 
index 81b7b791b3c36a09adef820b4ef4e5e837577121..c505fd5d338cc91d1de2199143c8141f24890355 100644 (file)
@@ -65,7 +65,11 @@ struct hdmi_spec_per_pin {
        hda_nid_t pin_nid;
        int num_mux_nids;
        hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
+
+       struct hda_codec *codec;
        struct hdmi_eld sink_eld;
+       struct delayed_work work;
+       int repoll_count;
 };
 
 struct hdmi_spec {
@@ -745,8 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
  * Unsolicited events
  */
 
-static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
-                              struct hdmi_eld *eld);
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
 
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
 {
@@ -755,7 +758,6 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
        int pd = !!(res & AC_UNSOL_RES_PD);
        int eldv = !!(res & AC_UNSOL_RES_ELDV);
        int pin_idx;
-       struct hdmi_eld *eld;
 
        printk(KERN_INFO
                "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
@@ -764,17 +766,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
        pin_idx = pin_nid_to_pin_index(spec, pin_nid);
        if (pin_idx < 0)
                return;
-       eld = &spec->pins[pin_idx].sink_eld;
-
-       hdmi_present_sense(codec, pin_nid, eld);
 
-       /*
-        * HDMI sink's ELD info cannot always be retrieved for now, e.g.
-        * in console or for audio devices. Assume the highest speakers
-        * configuration, to _not_ prohibit multi-channel audio playback.
-        */
-       if (!eld->spk_alloc)
-               eld->spk_alloc = 0xffff;
+       hdmi_present_sense(&spec->pins[pin_idx], 1);
 }
 
 static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -968,9 +961,11 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
        return 0;
 }
 
-static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
-                              struct hdmi_eld *eld)
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
 {
+       struct hda_codec *codec = per_pin->codec;
+       struct hdmi_eld *eld = &per_pin->sink_eld;
+       hda_nid_t pin_nid = per_pin->pin_nid;
        /*
         * Always execute a GetPinSense verb here, even when called from
         * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
@@ -980,26 +975,42 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
         * the unsolicited response to avoid custom WARs.
         */
        int present = snd_hda_pin_sense(codec, pin_nid);
+       bool eld_valid = false;
 
-       memset(eld, 0, sizeof(*eld));
+       memset(eld, 0, offsetof(struct hdmi_eld, eld_buffer));
 
        eld->monitor_present    = !!(present & AC_PINSENSE_PRESENCE);
        if (eld->monitor_present)
-               eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
-       else
-               eld->eld_valid  = 0;
+               eld_valid       = !!(present & AC_PINSENSE_ELDV);
 
        printk(KERN_INFO
                "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
-               codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
+               codec->addr, pin_nid, eld->monitor_present, eld_valid);
 
-       if (eld->eld_valid)
+       if (eld_valid) {
                if (!snd_hdmi_get_eld(eld, codec, pin_nid))
                        snd_hdmi_show_eld(eld);
+               else if (repoll) {
+                       queue_delayed_work(codec->bus->workq,
+                                          &per_pin->work,
+                                          msecs_to_jiffies(300));
+               }
+       }
 
        snd_hda_input_jack_report(codec, pin_nid);
 }
 
+static void hdmi_repoll_eld(struct work_struct *work)
+{
+       struct hdmi_spec_per_pin *per_pin =
+       container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
+
+       if (per_pin->repoll_count++ > 6)
+               per_pin->repoll_count = 0;
+
+       hdmi_present_sense(per_pin, per_pin->repoll_count);
+}
+
 static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
 {
        struct hdmi_spec *spec = codec->spec;
@@ -1228,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
        if (err < 0)
                return err;
 
-       hdmi_present_sense(codec, per_pin->pin_nid, &per_pin->sink_eld);
+       hdmi_present_sense(per_pin, 0);
        return 0;
 }
 
@@ -1279,6 +1290,8 @@ static int generic_hdmi_init(struct hda_codec *codec)
                                    AC_VERB_SET_UNSOLICITED_ENABLE,
                                    AC_USRSP_EN | pin_nid);
 
+               per_pin->codec = codec;
+               INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
                snd_hda_eld_proc_new(codec, eld, pin_idx);
        }
        return 0;
@@ -1293,10 +1306,12 @@ static void generic_hdmi_free(struct hda_codec *codec)
                struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
                struct hdmi_eld *eld = &per_pin->sink_eld;
 
+               cancel_delayed_work(&per_pin->work);
                snd_hda_eld_proc_free(codec, eld);
        }
        snd_hda_input_jack_free(codec);
 
+       flush_workqueue(codec->bus->workq);
        kfree(spec);
 }
 
index 308bb575bc06464978ee7f6c44dd2ecb1f588434..1d07e8fa243360d25236a4942ab5e363d69d1558 100644 (file)
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
        return false;
 }
 
+static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
+{
+       return spec->capsrc_nids ?
+               spec->capsrc_nids[idx] : spec->adc_nids[idx];
+}
+
 /* select the given imux item; either unmute exclusively or select the route */
 static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
                          unsigned int idx, bool force)
@@ -291,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
        imux = &spec->input_mux[mux_idx];
        if (!imux->num_items && mux_idx > 0)
                imux = &spec->input_mux[0];
+       if (!imux->num_items)
+               return 0;
 
        if (idx >= imux->num_items)
                idx = imux->num_items - 1;
@@ -303,8 +311,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
                adc_idx = spec->dyn_adc_idx[idx];
        }
 
-       nid = spec->capsrc_nids ?
-               spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
+       nid = get_capsrc(spec, adc_idx);
 
        /* no selection? */
        num_conns = snd_hda_get_conn_list(codec, nid, NULL);
@@ -1054,8 +1061,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
        spec->imux_pins[2] = spec->dock_mic_pin;
        for (i = 0; i < 3; i++) {
                strcpy(imux->items[i].label, texts[i]);
-               if (spec->imux_pins[i])
+               if (spec->imux_pins[i]) {
+                       hda_nid_t pin = spec->imux_pins[i];
+                       int c;
+                       for (c = 0; c < spec->num_adc_nids; c++) {
+                               hda_nid_t cap = get_capsrc(spec, c);
+                               int idx = get_connection_index(codec, cap, pin);
+                               if (idx >= 0) {
+                                       imux->items[i].index = idx;
+                                       break;
+                               }
+                       }
                        imux->num_items = i + 1;
+               }
        }
        spec->num_mux_defs = 1;
        spec->input_mux = imux;
@@ -1452,7 +1470,7 @@ static void alc_apply_fixup(struct hda_codec *codec, int action)
                switch (fix->type) {
                case ALC_FIXUP_SKU:
                        if (action != ALC_FIXUP_ACT_PRE_PROBE || !fix->v.sku)
-                               break;;
+                               break;
                        snd_printdd(KERN_INFO "hda_codec: %s: "
                                    "Apply sku override for %s\n",
                                    codec->chip_name, modelname);
@@ -1957,10 +1975,8 @@ static int alc_build_controls(struct hda_codec *codec)
                if (!kctl)
                        kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
                for (i = 0; kctl && i < kctl->count; i++) {
-                       const hda_nid_t *nids = spec->capsrc_nids;
-                       if (!nids)
-                               nids = spec->adc_nids;
-                       err = snd_hda_add_nid(codec, kctl, i, nids[i]);
+                       err = snd_hda_add_nid(codec, kctl, i,
+                                             get_capsrc(spec, i));
                        if (err < 0)
                                return err;
                }
@@ -2615,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
        case AUTO_PIN_SPEAKER_OUT:
                if (cfg->line_outs == 1)
                        return "Speaker";
+               if (cfg->line_outs == 2)
+                       return ch ? "Bass Speaker" : "Speaker";
                break;
        case AUTO_PIN_HP_OUT:
                /* for multi-io case, only the primary out */
@@ -2747,8 +2765,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec)
                }
 
                for (c = 0; c < num_adcs; c++) {
-                       hda_nid_t cap = spec->capsrc_nids ?
-                               spec->capsrc_nids[c] : spec->adc_nids[c];
+                       hda_nid_t cap = get_capsrc(spec, c);
                        idx = get_connection_index(codec, cap, pin);
                        if (idx >= 0) {
                                spec->imux_pins[imux->num_items] = pin;
@@ -2889,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
                if (!nid)
                        continue;
                if (found_in_nid_list(nid, spec->multiout.dac_nids,
-                                     spec->multiout.num_dacs))
+                                     ARRAY_SIZE(spec->private_dac_nids)))
                        continue;
                if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
                                      ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2910,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
        return 0;
 }
 
+/* return 0 if no possible DAC is found, 1 if one or more found */
 static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                                    const hda_nid_t *pins, hda_nid_t *dacs)
 {
@@ -2927,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                if (!dacs[i])
                        dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
        }
-       return 0;
+       return 1;
 }
 
 static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2937,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
 static int alc_auto_fill_dac_nids(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       const struct auto_pin_cfg *cfg = &spec->autocfg;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
        bool redone = false;
        int i;
 
@@ -2948,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        spec->multiout.extra_out_nid[0] = 0;
        memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
        spec->multiout.dac_nids = spec->private_dac_nids;
+       spec->multi_ios = 0;
 
        /* fill hard-wired DACs first */
        if (!redone) {
@@ -2981,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        for (i = 0; i < cfg->line_outs; i++) {
                if (spec->private_dac_nids[i])
                        spec->multiout.num_dacs++;
-               else
+               else {
                        memmove(spec->private_dac_nids + i,
                                spec->private_dac_nids + i + 1,
                                sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+                       spec->private_dac_nids[cfg->line_outs - 1] = 0;
+               }
        }
 
        if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3006,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        if (cfg->line_out_type != AUTO_PIN_HP_OUT)
                alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
                                 spec->multiout.hp_out_nid);
-       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
-               alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
-                                spec->multiout.extra_out_nid);
+       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+               int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+                                       cfg->speaker_pins,
+                                       spec->multiout.extra_out_nid);
+               /* if no speaker volume is assigned, try again as the primary
+                * output
+                */
+               if (!err && cfg->speaker_outs > 0 &&
+                   cfg->line_out_type == AUTO_PIN_HP_OUT) {
+                       cfg->hp_outs = cfg->line_outs;
+                       memcpy(cfg->hp_pins, cfg->line_out_pins,
+                              sizeof(cfg->hp_pins));
+                       cfg->line_outs = cfg->speaker_outs;
+                       memcpy(cfg->line_out_pins, cfg->speaker_pins,
+                              sizeof(cfg->speaker_pins));
+                       cfg->speaker_outs = 0;
+                       memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+                       cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+                       redone = false;
+                       goto again;
+               }
+       }
 
        return 0;
 }
@@ -3158,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
 }
 
 static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
-                                    hda_nid_t dac, const char *pfx)
+                                    hda_nid_t dac, const char *pfx,
+                                    int cidx)
 {
        struct alc_spec *spec = codec->spec;
        hda_nid_t sw, vol;
@@ -3174,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
                if (is_ctl_used(spec->sw_ctls, val))
                        return 0; /* already created */
                mark_ctl_usage(spec->sw_ctls, val);
-               return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+               return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
        }
 
        sw = alc_look_for_out_mute_nid(codec, pin, dac);
        vol = alc_look_for_out_vol_nid(codec, pin, dac);
-       err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+       err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
        if (err < 0)
                return err;
-       err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+       err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
        if (err < 0)
                return err;
        return 0;
@@ -3223,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
                hda_nid_t dac = *dacs;
                if (!dac)
                        dac = spec->multiout.dac_nids[0];
-               return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+               return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
        }
 
        if (dacs[num_pins - 1]) {
                /* OK, we have a multi-output system with individual volumes */
                for (i = 0; i < num_pins; i++) {
-                       snprintf(name, sizeof(name), "%s %s",
-                                pfx, channel_name[i]);
-                       err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
-                                                       name);
+                       if (num_pins >= 3) {
+                               snprintf(name, sizeof(name), "%s %s",
+                                        pfx, channel_name[i]);
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               name, 0);
+                       } else {
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               pfx, i);
+                       }
                        if (err < 0)
                                return err;
                }
@@ -3694,8 +3740,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin)
        if (!pin)
                return 0;
        for (i = 0; i < spec->num_adc_nids; i++) {
-               hda_nid_t cap = spec->capsrc_nids ?
-                       spec->capsrc_nids[i] : spec->adc_nids[i];
+               hda_nid_t cap = get_capsrc(spec, i);
                int idx;
 
                idx = get_connection_index(codec, cap, pin);
index edc2b7bc177c6bca0c6999c66d732b84230d054a..616678fde486d7877ca35bd0a0f72b49647eb833 100644 (file)
@@ -215,6 +215,7 @@ struct sigmatel_spec {
        unsigned int gpio_mute;
        unsigned int gpio_led;
        unsigned int gpio_led_polarity;
+       unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
        unsigned int vref_led;
 
        /* stream */
@@ -227,7 +228,6 @@ struct sigmatel_spec {
 
        /* power management */
        unsigned int num_pwrs;
-       const unsigned int *pwr_mapping;
        const hda_nid_t *pwr_nids;
        const hda_nid_t *dac_list;
 
@@ -374,18 +374,15 @@ static const unsigned long stac92hd73xx_capvols[] = {
 
 #define STAC92HD83_DAC_COUNT 3
 
-static const hda_nid_t stac92hd83xxx_pwr_nids[4] = {
-       0xa, 0xb, 0xd, 0xe,
+static const hda_nid_t stac92hd83xxx_pwr_nids[7] = {
+       0x0a, 0x0b, 0x0c, 0xd, 0x0e,
+       0x0f, 0x10
 };
 
 static const hda_nid_t stac92hd83xxx_slave_dig_outs[2] = {
        0x1e, 0,
 };
 
-static const unsigned int stac92hd83xxx_pwr_mapping[4] = {
-       0x03, 0x0c, 0x20, 0x40,
-};
-
 static const hda_nid_t stac92hd83xxx_dmic_nids[] = {
                0x11, 0x20,
 };
@@ -1645,6 +1642,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
                      "Alienware M17x", STAC_ALIENWARE_M17X),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
                      "Alienware M17x", STAC_ALIENWARE_M17X),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+                     "Alienware M17x", STAC_ALIENWARE_M17X),
        {} /* terminator */
 };
 
@@ -4320,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
                spec->eapd_switch = val;
        get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
        if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
-               if (spec->gpio_led <= 8) {
-                       spec->gpio_mask |= spec->gpio_led;
-                       spec->gpio_dir |= spec->gpio_led;
-                       if (spec->gpio_led_polarity)
-                               spec->gpio_data |= spec->gpio_led;
-               }
+               spec->gpio_mask |= spec->gpio_led;
+               spec->gpio_dir |= spec->gpio_led;
+               if (spec->gpio_led_polarity)
+                       spec->gpio_data |= spec->gpio_led;
        }
 }
 
@@ -4443,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec)
                int pinctl, def_conf;
 
                /* power on when no jack detection is available */
-               if (!spec->hp_detect) {
+               /* or when the VREF is used for controlling LED */
+               if (!spec->hp_detect ||
+                   spec->vref_mute_led_nid == nid) {
                        stac_toggle_power_map(codec, nid, 1);
                        continue;
                }
@@ -4470,8 +4469,12 @@ static int stac92xx_init(struct hda_codec *codec)
                                stac_toggle_power_map(codec, nid, 1);
                        continue;
                }
-               if (enable_pin_detect(codec, nid, STAC_PWR_EVENT))
+               if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) {
                        stac_issue_unsol_event(codec, nid);
+                       continue;
+               }
+               /* none of the above, turn the port OFF */
+               stac_toggle_power_map(codec, nid, 0);
        }
 
        /* sync mute LED */
@@ -4727,11 +4730,7 @@ static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid,
        if (idx >= spec->num_pwrs)
                return;
 
-       /* several codecs have two power down bits */
-       if (spec->pwr_mapping)
-               idx = spec->pwr_mapping[idx];
-       else
-               idx = 1 << idx;
+       idx = 1 << idx;
 
        val = snd_hda_codec_read(codec, codec->afg, 0, 0x0fec, 0x0) & 0xff;
        if (enable)
@@ -4915,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                        if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
                                  &spec->gpio_led_polarity,
                                  &spec->gpio_led) == 2) {
-                               if (spec->gpio_led < 4)
+                               unsigned int max_gpio;
+                               max_gpio = snd_hda_param_read(codec, codec->afg,
+                                                             AC_PAR_GPIO_CAP);
+                               max_gpio &= AC_GPIO_IO_COUNT;
+                               if (spec->gpio_led < max_gpio)
                                        spec->gpio_led = 1 << spec->gpio_led;
+                               else
+                                       spec->vref_mute_led_nid = spec->gpio_led;
                                return 1;
                        }
                        if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -4924,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                                set_hp_led_gpio(codec);
                                return 1;
                        }
+                       /* BIOS bug: unfilled OEM string */
+                       if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+                               set_hp_led_gpio(codec);
+                               spec->gpio_led_polarity = 1;
+                               return 1;
+                       }
                }
 
                /*
@@ -5045,29 +5056,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
        struct sigmatel_spec *spec = codec->spec;
 
        /* sync mute LED */
-       if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
-                       stac_gpio_set(codec, spec->gpio_mask,
-                                       spec->gpio_dir, spec->gpio_data);
-               } else {
-                       stac_vrefout_set(codec,
-                                       spec->gpio_led, spec->vref_led);
-               }
-       }
-       return 0;
-}
-
-static int stac92xx_post_suspend(struct hda_codec *codec)
-{
-       struct sigmatel_spec *spec = codec->spec;
-       if (spec->gpio_led > 8) {
-               /* with vref-out pin used for mute led control
-                * codec AFG is prevented from D3 state, but on
-                * system suspend it can (and should) be used
-                */
-               snd_hda_codec_read(codec, codec->afg, 0,
-                               AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       }
+       if (spec->vref_mute_led_nid)
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
+       else if (spec->gpio_led)
+               stac_gpio_set(codec, spec->gpio_mask,
+                             spec->gpio_dir, spec->gpio_data);
        return 0;
 }
 
@@ -5078,7 +5072,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
        struct sigmatel_spec *spec = codec->spec;
 
        if (power_state == AC_PWRST_D3) {
-               if (spec->gpio_led > 8) {
+               if (spec->vref_mute_led_nid) {
                        /* with vref-out pin used for mute led control
                         * codec AFG is prevented from D3 state
                         */
@@ -5131,7 +5125,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                }
        }
        /*polarity defines *not* muted state level*/
-       if (spec->gpio_led <= 8) {
+       if (!spec->vref_mute_led_nid) {
                if (muted)
                        spec->gpio_data &= ~spec->gpio_led; /* orange */
                else
@@ -5149,7 +5143,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                muted_lvl = spec->gpio_led_polarity ?
                                AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
                spec->vref_led = muted ? muted_lvl : notmtd_lvl;
-               stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
        }
        return 0;
 }
@@ -5629,9 +5624,6 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
                snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
        }
 
-       /* reset pin power-down; Windows may leave these bits after reboot */
-       snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7EC, 0);
-       snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7ED, 0);
        codec->no_trigger_sense = 1;
        codec->spec = spec;
 
@@ -5641,7 +5633,6 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
        codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
        spec->digbeep_nid = 0x21;
        spec->pwr_nids = stac92hd83xxx_pwr_nids;
-       spec->pwr_mapping = stac92hd83xxx_pwr_mapping;
        spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids);
        spec->multiout.dac_nids = spec->dac_nids;
        spec->init = stac92hd83xxx_core_init;
@@ -5658,9 +5649,6 @@ again:
                stac92xx_set_config_regs(codec,
                                stac92hd83xxx_brd_tbl[spec->board_config]);
 
-       if (spec->board_config != STAC_92HD83XXX_PWR_REF)
-               spec->num_pwrs = 0;
-
        codec->patch_ops = stac92xx_patch_ops;
 
        if (find_mute_led_gpio(codec, 0))
@@ -5670,15 +5658,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
@@ -5869,8 +5855,6 @@ again:
                    (codec->revision_id & 0xf) == 1)
                        spec->stream_delay = 40; /* 40 milliseconds */
 
-               /* no output amps */
-               spec->num_pwrs = 0;
                /* disable VSW */
                spec->init = stac92hd71bxx_core_init;
                unmute_init++;
@@ -5885,8 +5869,6 @@ again:
                if ((codec->revision_id & 0xf) == 1)
                        spec->stream_delay = 40; /* 40 milliseconds */
 
-               /* no output amps */
-               spec->num_pwrs = 0;
                /* fallthru */
        default:
                spec->init = stac92hd71bxx_core_init;
@@ -5989,15 +5971,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
index 431c0d417eeb61500c3332c687568a7851307201..b5137629f8e942a75a941e0e183cef98fb58db02 100644 (file)
@@ -208,6 +208,7 @@ struct via_spec {
        /* work to check hp jack state */
        struct hda_codec *codec;
        struct delayed_work vt1708_hp_work;
+       int hp_work_active;
        int vt1708_jack_detect;
        int vt1708_hp_present;
 
@@ -305,27 +306,35 @@ enum {
 static void analog_low_current_mode(struct hda_codec *codec);
 static bool is_aa_path_mute(struct hda_codec *codec);
 
-static void vt1708_start_hp_work(struct via_spec *spec)
+#define hp_detect_with_aa(codec) \
+       (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
+        !is_aa_path_mute(codec))
+
+static void vt1708_stop_hp_work(struct via_spec *spec)
 {
        if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
                return;
-       snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
-                           !spec->vt1708_jack_detect);
-       if (!delayed_work_pending(&spec->vt1708_hp_work))
-               schedule_delayed_work(&spec->vt1708_hp_work,
-                                     msecs_to_jiffies(100));
+       if (spec->hp_work_active) {
+               snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
+               cancel_delayed_work_sync(&spec->vt1708_hp_work);
+               spec->hp_work_active = 0;
+       }
 }
 
-static void vt1708_stop_hp_work(struct via_spec *spec)
+static void vt1708_update_hp_work(struct via_spec *spec)
 {
        if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
                return;
-       if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1
-           && !is_aa_path_mute(spec->codec))
-               return;
-       snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
-                           !spec->vt1708_jack_detect);
-       cancel_delayed_work_sync(&spec->vt1708_hp_work);
+       if (spec->vt1708_jack_detect &&
+           (spec->active_streams || hp_detect_with_aa(spec->codec))) {
+               if (!spec->hp_work_active) {
+                       snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
+                       schedule_delayed_work(&spec->vt1708_hp_work,
+                                             msecs_to_jiffies(100));
+                       spec->hp_work_active = 1;
+               }
+       } else if (!hp_detect_with_aa(spec->codec))
+               vt1708_stop_hp_work(spec);
 }
 
 static void set_widgets_power_state(struct hda_codec *codec)
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
 
        set_widgets_power_state(codec);
        analog_low_current_mode(snd_kcontrol_chip(kcontrol));
-       if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) {
-               if (is_aa_path_mute(codec))
-                       vt1708_start_hp_work(codec->spec);
-               else
-                       vt1708_stop_hp_work(codec->spec);
-       }
+       vt1708_update_hp_work(codec->spec);
        return change;
 }
 
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
        spec->cur_dac_stream_tag = stream_tag;
        spec->cur_dac_format = format;
        mutex_unlock(&spec->config_mutex);
-       vt1708_start_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
        spec->cur_hp_stream_tag = stream_tag;
        spec->cur_hp_format = format;
        mutex_unlock(&spec->config_mutex);
-       vt1708_start_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
        snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
        spec->active_streams &= ~STREAM_MULTI_OUT;
        mutex_unlock(&spec->config_mutex);
-       vt1708_stop_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
                snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
        spec->active_streams &= ~STREAM_INDEP_HP;
        mutex_unlock(&spec->config_mutex);
-       vt1708_stop_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec)
        int nums;
        struct via_spec *spec = codec->spec;
 
-       if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0])
+       if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
+           (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
                present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
 
        if (spec->smart51_enabled)
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
 
        if (spec->codec_type != VT1708)
                return 0;
-       spec->vt1708_jack_detect =
-               !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
        ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
        return 0;
 }
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
        struct via_spec *spec = codec->spec;
-       int change;
+       int val;
 
        if (spec->codec_type != VT1708)
                return 0;
-       spec->vt1708_jack_detect = ucontrol->value.integer.value[0];
-       change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8))
-               == !spec->vt1708_jack_detect;
-       if (spec->vt1708_jack_detect) {
+       val = !!ucontrol->value.integer.value[0];
+       if (spec->vt1708_jack_detect == val)
+               return 0;
+       spec->vt1708_jack_detect = val;
+       if (spec->vt1708_jack_detect &&
+           snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
                mute_aa_path(codec, 1);
                notify_aa_path_ctls(codec);
        }
-       return change;
+       via_hp_automute(codec);
+       vt1708_update_hp_work(spec);
+       return 1;
 }
 
 static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec)
        via_auto_init_unsol_event(codec);
 
        via_hp_automute(codec);
+       vt1708_update_hp_work(spec);
 
        return 0;
 }
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
                spec->vt1708_hp_present ^= 1;
                via_hp_automute(spec->codec);
        }
-       vt1708_start_hp_work(spec);
+       if (spec->vt1708_jack_detect)
+               schedule_delayed_work(&spec->vt1708_hp_work,
+                                     msecs_to_jiffies(100));
 }
 
 static int get_mux_nids(struct hda_codec *codec)
index 5c8717e29eebdbc4983933dcccaee36492457e41..8c3e7fcefd99c91c38d39f92fd0cae8323cee210 100644 (file)
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
        return ioread32(address);
 }
 
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
+static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
+                              u32 len)
 {
-       void __iomem *address = lx_dsp_register(chip, port);
-       memcpy_fromio(data, address, len*sizeof(u32));
+       u32 __iomem *address = lx_dsp_register(chip, port);
+       int i;
+
+       /* we cannot use memcpy_fromio */
+       for (i = 0; i != len; ++i)
+               data[i] = ioread32(address + i);
 }
 
 
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
        iowrite32(data, address);
 }
 
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
-                        u32 len)
+static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
+                               const u32 *data, u32 len)
 {
-       void __iomem *address = lx_dsp_register(chip, port);
-       memcpy_toio(address, data, len*sizeof(u32));
+       u32 __iomem *address = lx_dsp_register(chip, port);
+       int i;
+
+       /* we cannot use memcpy_to */
+       for (i = 0; i != len; ++i)
+               iowrite32(data[i], address + i);
 }
 
 
index 1dd562980b6c3595c233beffe49012c6352c605c..4d7ff797a6468abf5b5499cdfd92dab430d9948f 100644 (file)
@@ -72,10 +72,7 @@ enum {
 };
 
 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
-                        u32 len);
 
 /* plx register access */
 enum {
index e760adad9523ebf82db4f07dbc259c73d8d44326..19ee2203cbb50fe7250bca2bad79c14e15a6069b 100644 (file)
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
                        hdspm->io_type = AES32;
                        hdspm->card_name = "RME AES32";
                        hdspm->midiPorts = 2;
-               } else if ((hdspm->firmware_rev == 0xd5) ||
+               } else if ((hdspm->firmware_rev == 0xd2) ||
                        ((hdspm->firmware_rev >= 0xc8)  &&
                                (hdspm->firmware_rev <= 0xcf))) {
                        hdspm->io_type = MADI;
index a391e622a19209f535eb0441e85348cda4d085aa..28dfafb56dd1d70a9d95e4ef53f6e7ba22fb4faf 100644 (file)
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
 static int enable = 1;
+static int codecs = 1;
 
 module_param(index, int, 0444);
 MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
 MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
 module_param(enable, bool, 0444);
 MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
 
 static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
        dma_addr_t silence_dma_addr;
 };
 
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
 #define SIS_PRIMARY_CODEC_PRESENT      0x0001
 #define SIS_SECONDARY_CODEC_PRESENT    0x0002
 #define SIS_TERTIARY_CODEC_PRESENT     0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
 {
        unsigned long io = sis->ioport;
        void __iomem *ioaddr = sis->ioaddr;
+       unsigned long timeout;
        u16 status;
        int count;
        int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
        while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
                udelay(1);
 
+       /* Command complete, we can let go of the semaphore now.
+        */
+       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+       if (!count)
+               return -EIO;
+
        /* Now that we've finished the reset, find out what's attached.
+        * There are some codec/board combinations that take an extremely
+        * long time to come up. 350+ ms has been observed in the field,
+        * so we'll give them up to 500ms.
         */
-       status = inl(io + SIS_AC97_STATUS);
-       if (status & SIS_AC97_STATUS_CODEC_READY)
-               sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC2_READY)
-               sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC3_READY)
-               sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
-       /* All done, let go of the semaphore, and check for errors
+       sis->codecs_present = 0;
+       timeout = msecs_to_jiffies(500) + jiffies;
+       while (time_before_eq(jiffies, timeout)) {
+               status = inl(io + SIS_AC97_STATUS);
+               if (status & SIS_AC97_STATUS_CODEC_READY)
+                       sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC2_READY)
+                       sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC3_READY)
+                       sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+               if (sis->codecs_present == codecs)
+                       break;
+
+               msleep(1);
+       }
+
+       /* All done, check for errors.
         */
-       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
-       if (!sis->codecs_present || !count)
+       if (!sis->codecs_present) {
+               printk(KERN_ERR "sis7019: could not find any codecs\n");
                return -EIO;
+       }
+
+       if (sis->codecs_present != codecs) {
+               printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+                      sis->codecs_present, codecs);
+       }
 
        /* Let the hardware know that the audio driver is alive,
         * and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
        if (!enable)
                goto error_out;
 
+       /* The user can specify which codecs should be present so that we
+        * can wait for them to show up if they are slow to recover from
+        * the AC97 cold reset. We default to a single codec, the primary.
+        *
+        * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+        */
+       codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+                 SIS_TERTIARY_CODEC_PRESENT;
+       if (!codecs)
+               codecs = SIS_PRIMARY_CODEC_PRESENT;
+
        rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
        if (rc < 0)
                goto error_out;
index bee3c94f58b0736c57f361141e0ed32e58640317..d1fcc816ce9705c5aca82f68eb327d65618301cd 100644 (file)
@@ -1,6 +1,6 @@
 config SND_ATMEL_SOC
        tristate "SoC Audio for the Atmel System-on-Chip"
-       depends on ARCH_AT91 || AVR32
+       depends on ARCH_AT91
        help
          Say Y or M if you want to add support for codecs attached to
          the ATMEL SSC interface. You will also need
@@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731
          Say Y if you want to add support for SoC audio on WM8731-based
          AT91sam9g20 evaluation board.
 
-config SND_AT32_SOC_PLAYPAQ
-        tristate "SoC Audio support for PlayPaq with WM8510"
-        depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
-        select SND_ATMEL_SOC_SSC
-        select SND_SOC_WM8510
-        help
-          Say Y or M here if you want to add support for SoC audio
-          on the LRS PlayPaq.
-
-config SND_AT32_SOC_PLAYPAQ_SLAVE
-        bool "Run CODEC on PlayPaq in slave mode"
-        depends on SND_AT32_SOC_PLAYPAQ
-        default n
-        help
-          Say Y if you want to run with the AT32 SSC generating the BCLK
-          and FRAME signals on the PlayPaq.  Unless you want to play
-          with the AT32 as the SSC master, you probably want to say N here,
-          as this will give you better sound quality.
-
 config SND_AT91_SOC_AFEB9260
        tristate "SoC Audio support for AFEB9260 board"
        depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
index e7ea56bd5f82a94de94d4169c464e809d34dfb5e..a5c0bf19da78f01e823fc614c61528c30272a67c 100644 (file)
@@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
 # AT91 Machine Support
 snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
 
-# AT32 Machine Support
-snd-soc-playpaq-objs := playpaq_wm8510.o
-
 obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
-obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
 obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
deleted file mode 100644 (file)
index 73ae99a..0000000
+++ /dev/null
@@ -1,473 +0,0 @@
-/* sound/soc/at32/playpaq_wm8510.c
- * ASoC machine driver for PlayPaq using WM8510 codec
- *
- * Copyright (C) 2008 Long Range Systems
- *    Geoffrey Wossum <gwossum@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
- *
- * NOTE: If you don't have the AT32 enhanced portmux configured (which
- * isn't currently in the mainline or Atmel patched kernel), you will
- * need to set the MCLK pin (PA30) to peripheral A in your board initialization
- * code.  Something like:
- *     at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/at32ap700x.h>
-#include <mach/portmux.h>
-
-#include "../codecs/wm8510.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-
-/*-------------------------------------------------------------------------*\
- * constants
-\*-------------------------------------------------------------------------*/
-#define MCLK_PIN               GPIO_PIN_PA(30)
-#define MCLK_PERIPH            GPIO_PERIPH_A
-
-
-/*-------------------------------------------------------------------------*\
- * data types
-\*-------------------------------------------------------------------------*/
-/* SSC clocking data */
-struct ssc_clock_data {
-       /* CMR div */
-       unsigned int cmr_div;
-
-       /* Frame period (as needed by xCMR.PERIOD) */
-       unsigned int period;
-
-       /* The SSC clock rate these settings where calculated for */
-       unsigned long ssc_rate;
-};
-
-
-/*-------------------------------------------------------------------------*\
- * module data
-\*-------------------------------------------------------------------------*/
-static struct clk *_gclk0;
-static struct clk *_pll0;
-
-#define CODEC_CLK (_gclk0)
-
-
-/*-------------------------------------------------------------------------*\
- * Sound SOC operations
-\*-------------------------------------------------------------------------*/
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
-       struct snd_pcm_hw_params *params,
-       struct snd_soc_dai *cpu_dai)
-{
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       struct ssc_clock_data cd;
-       unsigned int rate, width_bits, channels;
-       unsigned int bitrate, ssc_div;
-       unsigned actual_rate;
-
-
-       /*
-        * Figure out required bitrate
-        */
-       rate = params_rate(params);
-       channels = params_channels(params);
-       width_bits = snd_pcm_format_physical_width(params_format(params));
-       bitrate = rate * width_bits * channels;
-
-
-       /*
-        * Figure out required SSC divider and period for required bitrate
-        */
-       cd.ssc_rate = clk_get_rate(ssc->clk);
-       ssc_div = cd.ssc_rate / bitrate;
-       cd.cmr_div = ssc_div / 2;
-       if (ssc_div & 1) {
-               /* round cmr_div up */
-               cd.cmr_div++;
-       }
-       cd.period = width_bits - 1;
-
-
-       /*
-        * Find actual rate, compare to requested rate
-        */
-       actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
-       pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
-                rate, actual_rate);
-
-
-       return cd;
-}
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-
-static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
-                                   struct snd_pcm_hw_params *params)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai = rtd->codec_dai;
-       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
-       int ret;
-
-
-       /* Due to difficulties with getting the correct clocks from the AT32's
-        * PLL0, we're going to let the CODEC be in charge of all the clocks
-        */
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBM_CFM);
-#else
-       struct ssc_clock_data cd;
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBS_CFS);
-#endif
-
-       if (ssc == NULL) {
-               pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
-               return -EINVAL;
-       }
-
-
-       /*
-        * Figure out PLL and BCLK dividers for WM8510
-        */
-       switch (params_rate(params)) {
-       case 48000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 44100:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 22050:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_4;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 16000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_6;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 11025:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_8;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 8000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_12;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       default:
-               pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
-                          params_rate(params));
-               return -EINVAL;
-       }
-
-
-       /*
-        * set CPU and CODEC DAI configuration
-        */
-       ret = snd_soc_dai_set_fmt(codec_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CODEC DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       /*
-        * Set CPU clock configuration
-        */
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
-       pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
-                cd.cmr_div, cd.period);
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
-                                         cd.period);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU transmit period (%d)\n",
-                          ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       /*
-        * Set CODEC clock configuration
-        */
-       pr_debug("playpaq_wm8510: "
-                "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
-                clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
-
-
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
-       if (ret < 0) {
-               pr_warning
-                   ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
-                    ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
-                                        clk_get_rate(CODEC_CLK), pll_out);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       return 0;
-}
-
-
-
-static struct snd_soc_ops playpaq_wm8510_ops = {
-       .hw_params = playpaq_wm8510_hw_params,
-};
-
-
-
-static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
-       SND_SOC_DAPM_MIC("Int Mic", NULL),
-       SND_SOC_DAPM_SPK("Ext Spk", NULL),
-};
-
-
-
-static const struct snd_soc_dapm_route intercon[] = {
-       /* speaker connected to SPKOUT */
-       {"Ext Spk", NULL, "SPKOUTP"},
-       {"Ext Spk", NULL, "SPKOUTN"},
-
-       {"Mic Bias", NULL, "Int Mic"},
-       {"MICN", NULL, "Mic Bias"},
-       {"MICP", NULL, "Mic Bias"},
-};
-
-
-
-static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
-{
-       struct snd_soc_codec *codec = rtd->codec;
-       struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int i;
-
-       /*
-        * Add DAPM widgets
-        */
-       for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
-               snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
-
-
-
-       /*
-        * Setup audio path interconnects
-        */
-       snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
-
-
-       /* always connected pins */
-       snd_soc_dapm_enable_pin(dapm, "Int Mic");
-       snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-
-
-       /* Make CSB show PLL rate */
-       snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
-                                      WM8510_OPCLKDIV_1 | 4);
-
-       return 0;
-}
-
-
-
-static struct snd_soc_dai_link playpaq_wm8510_dai = {
-       .name = "WM8510",
-       .stream_name = "WM8510 PCM",
-       .cpu_dai_name= "atmel-ssc-dai.0",
-       .platform_name = "atmel-pcm-audio",
-       .codec_name = "wm8510-codec.0-0x1a",
-       .codec_dai_name = "wm8510-hifi",
-       .init = playpaq_wm8510_init,
-       .ops = &playpaq_wm8510_ops,
-};
-
-
-
-static struct snd_soc_card snd_soc_playpaq = {
-       .name = "LRS_PlayPaq_WM8510",
-       .dai_link = &playpaq_wm8510_dai,
-       .num_links = 1,
-};
-
-static struct platform_device *playpaq_snd_device;
-
-
-static int __init playpaq_asoc_init(void)
-{
-       int ret = 0;
-
-       /*
-        * Configure MCLK for WM8510
-        */
-       _gclk0 = clk_get(NULL, "gclk0");
-       if (IS_ERR(_gclk0)) {
-               _gclk0 = NULL;
-               ret = PTR_ERR(_gclk0);
-               goto err_gclk0;
-       }
-       _pll0 = clk_get(NULL, "pll0");
-       if (IS_ERR(_pll0)) {
-               _pll0 = NULL;
-               ret = PTR_ERR(_pll0);
-               goto err_pll0;
-       }
-       ret = clk_set_parent(_gclk0, _pll0);
-       if (ret) {
-               pr_warning("snd-soc-playpaq: "
-                          "Failed to set PLL0 as parent for DAC clock\n");
-               goto err_set_clk;
-       }
-       clk_set_rate(CODEC_CLK, 12000000);
-       clk_enable(CODEC_CLK);
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
-#endif
-
-
-       /*
-        * Create and register platform device
-        */
-       playpaq_snd_device = platform_device_alloc("soc-audio", 0);
-       if (playpaq_snd_device == NULL) {
-               ret = -ENOMEM;
-               goto err_device_alloc;
-       }
-
-       platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
-
-       ret = platform_device_add(playpaq_snd_device);
-       if (ret) {
-               pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
-                          ret);
-               goto err_device_add;
-       }
-
-       return 0;
-
-
-err_device_add:
-       if (playpaq_snd_device != NULL) {
-               platform_device_put(playpaq_snd_device);
-               playpaq_snd_device = NULL;
-       }
-err_device_alloc:
-err_set_clk:
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-err_pll0:
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       return ret;
-}
-
-
-static void __exit playpaq_asoc_exit(void)
-{
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_free_pin(MCLK_PIN);
-#endif
-
-       platform_device_unregister(playpaq_snd_device);
-       playpaq_snd_device = NULL;
-}
-
-module_init(playpaq_asoc_init);
-module_exit(playpaq_asoc_exit);
-
-MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
-MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
-MODULE_LICENSE("GPL");
index 4584514d93d4fd21a92de37847d318951e824bf8..fa787d45d74a920a32c4e8327a69c4a8a3544461 100644 (file)
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_CX20442
        select SND_SOC_DA7210 if I2C
        select SND_SOC_DFBMCS320
-       select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+       select SND_SOC_JZ4740_CODEC
        select SND_SOC_LM4857 if I2C
        select SND_SOC_MAX98088 if I2C
        select SND_SOC_MAX98095 if I2C
index 444747f0db26615992e360b6a162eaf7b9f9ac44..dd7be0dbbc58189ff153b1a7b5724967bd4caadd 100644 (file)
@@ -34,7 +34,7 @@
 
 #define AD1836_ADC_CTRL2               13
 #define AD1836_ADC_WORD_LEN_MASK       0x30
-#define AD1836_ADC_WORD_OFFSET         5
+#define AD1836_ADC_WORD_OFFSET         4
 #define AD1836_ADC_SERFMT_MASK         (7 << 6)
 #define AD1836_ADC_SERFMT_PCK256       (0x4 << 6)
 #define AD1836_ADC_SERFMT_PCK128       (0x5 << 6)
index 1ccf8dd47576ce4c746ffb54461c19f196acff43..45c63028b40d1636b56f6aa3e5f13e0d110b9a5d 100644 (file)
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = {
 };
 
 static const unsigned int adau1373_bass_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(3),
        0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
        3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
        5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
index f1f237ecec2a6c43dcdf9ee31b72da7f0cdecff6..73f46eb459f15fa43c5aadc89c2d5a61346fb351 100644 (file)
@@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
 static int cs4270_soc_resume(struct snd_soc_codec *codec)
 {
        struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
-       struct i2c_client *i2c_client = to_i2c_client(codec->dev);
        int reg;
 
        regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
@@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec)
        ndelay(500);
 
        /* first restore the entire register cache ... */
-       for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
-               u8 val = snd_soc_read(codec, reg);
-
-               if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
-                       dev_err(codec->dev, "i2c write failed\n");
-                       return -EIO;
-               }
-       }
+       snd_soc_cache_sync(codec);
 
        /* ... then disable the power-down bits */
        reg = snd_soc_read(codec, CS4270_PWRCTL);
index 23d1bd5dadda36185e2c56702e7fd17a739f9a1a..69fde1506fe1fde2fe312ff80176723171ccef07 100644 (file)
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
 {
        int ret;
        /* Set power-down bit */
-       ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN);
+       ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN,
+                                 CS4271_MODE2_PDN);
        if (ret < 0)
                return ret;
        return 0;
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec)
                return ret;
        }
 
-       ret = snd_soc_update_bits(codec, CS4271_MODE2, 0,
-               CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
+       ret = snd_soc_update_bits(codec, CS4271_MODE2,
+                                 CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
+                                 CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
        if (ret < 0)
                return ret;
        ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
index 8c3c8205d19e99016e47b1564aa58cbdf91b0bab..1ee66361f61b946e5738798daf03d61baf2f8ecb 100644 (file)
@@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec)
 
 static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
        .probe =        cs42l51_probe,
-       .reg_cache_size = CS42L51_NUMREGS,
+       .reg_cache_size = CS42L51_NUMREGS + 1,
        .reg_word_size = sizeof(u8),
 };
 
index e373f8f0690731874d0a153190951ddc564500fe..3e1f4e172bfb90c318e4150d8aebbe782b78d4ab 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/delay.h>
 
index 9e7e964a5fa3fd1a8824c17c9dad4bbc60923909..dcf6f2a1600ae5726a60d0a7e11ef6e53d57d466 100644 (file)
@@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
        unsigned int mask = mc->max;
        unsigned int val = (ucontrol->value.integer.value[0] & mask);
        unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
-       unsigned int change = 1;
+       unsigned int change = 0;
 
-       if (((max9877_regs[reg] >> shift) & mask) == val)
-               change = 0;
+       if (((max9877_regs[reg] >> shift) & mask) != val)
+               change = 1;
 
-       if (((max9877_regs[reg2] >> shift) & mask) == val2)
-               change = 0;
+       if (((max9877_regs[reg2] >> shift) & mask) != val2)
+               change = 1;
 
        if (change) {
                max9877_regs[reg] &= ~(mask << shift);
index 27a078cbb6eb2542cb6bc0fa0be372b5a77cabac..4646e808b90a334e0d59935a1e93244672d276be 100644 (file)
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
 static unsigned int mic_bst_tlv[] = {
-       TLV_DB_RANGE_HEAD(6),
+       TLV_DB_RANGE_HEAD(7),
        0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
        1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
        2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
index d15695d1c27397a7b3f2966c1806584e5e9a39bc..bbcf921166f7470fad24577d2aa52d1cf85258f4 100644 (file)
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
 
 /* tlv for mic gain, 0db 20db 30db 40db */
 static const unsigned int mic_gain_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(2),
        0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
        1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
 };
index bb82408ab8e1bb93f187dac610e5a1141501bd72..d2f37152f940cebc67f93a5bff140e64a67a07e7 100644 (file)
@@ -76,6 +76,8 @@ struct sta32x_priv {
 
        unsigned int mclk;
        unsigned int format;
+
+       u32 coef_shadow[STA32X_COEF_COUNT];
 };
 
 static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
                                  struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
        int numcoef = kcontrol->private_value >> 16;
        int index = kcontrol->private_value & 0xffff;
        unsigned int cfud;
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
        snd_soc_write(codec, STA32X_CFUD, cfud);
 
        snd_soc_write(codec, STA32X_CFADDR2, index);
+       for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
+               sta32x->coef_shadow[index + i] =
+                         (ucontrol->value.bytes.data[3 * i] << 16)
+                       | (ucontrol->value.bytes.data[3 * i + 1] << 8)
+                       | (ucontrol->value.bytes.data[3 * i + 2]);
        for (i = 0; i < 3 * numcoef; i++)
                snd_soc_write(codec, STA32X_B1CF1 + i,
                              ucontrol->value.bytes.data[i]);
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
+int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
+{
+       struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+       unsigned int cfud;
+       int i;
+
+       /* preserve reserved bits in STA32X_CFUD */
+       cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
+
+       for (i = 0; i < STA32X_COEF_COUNT; i++) {
+               snd_soc_write(codec, STA32X_CFADDR2, i);
+               snd_soc_write(codec, STA32X_B1CF1,
+                             (sta32x->coef_shadow[i] >> 16) & 0xff);
+               snd_soc_write(codec, STA32X_B1CF2,
+                             (sta32x->coef_shadow[i] >> 8) & 0xff);
+               snd_soc_write(codec, STA32X_B1CF3,
+                             (sta32x->coef_shadow[i]) & 0xff);
+               /* chip documentation does not say if the bits are
+                * self-clearing, so do it explicitly */
+               snd_soc_write(codec, STA32X_CFUD, cfud);
+               snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
+       }
+       return 0;
+}
+
+int sta32x_cache_sync(struct snd_soc_codec *codec)
+{
+       unsigned int mute;
+       int rc;
+
+       if (!codec->cache_sync)
+               return 0;
+
+       /* mute during register sync */
+       mute = snd_soc_read(codec, STA32X_MMUTE);
+       snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
+       sta32x_sync_coef_shadow(codec);
+       rc = snd_soc_cache_sync(codec);
+       snd_soc_write(codec, STA32X_MMUTE, mute);
+       return rc;
+}
+
 #define SINGLE_COEF(xname, index) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = sta32x_coefficient_info, \
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
                                return ret;
                        }
 
-                       snd_soc_cache_sync(codec);
+                       sta32x_cache_sync(codec);
                }
 
                /* Power up to mute */
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
                            STA32X_CxCFG_OM_MASK,
                            2 << STA32X_CxCFG_OM_SHIFT);
 
+       /* initialize coefficient shadow RAM with reset values */
+       for (i = 4; i <= 49; i += 5)
+               sta32x->coef_shadow[i] = 0x400000;
+       for (i = 50; i <= 54; i++)
+               sta32x->coef_shadow[i] = 0x7fffff;
+       sta32x->coef_shadow[55] = 0x5a9df7;
+       sta32x->coef_shadow[56] = 0x7fffff;
+       sta32x->coef_shadow[59] = 0x7fffff;
+       sta32x->coef_shadow[60] = 0x400000;
+       sta32x->coef_shadow[61] = 0x400000;
+
        sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
        /* Bias level configuration will have done an extra enable */
        regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
index b97ee5a75667399e77a5bcd7493432407d554cc2..d8e32a6262ee087ec13e73d24e57b936eefaa42b 100644 (file)
@@ -19,6 +19,7 @@
 /* STA326 register addresses */
 
 #define STA32X_REGISTER_COUNT  0x2d
+#define STA32X_COEF_COUNT 62
 
 #define STA32X_CONFA   0x00
 #define STA32X_CONFB    0x01
index c5ca8cfea60f80f8de27cc5d12ad55f69bd900f2..0441893e270ed2b5621833fecc997b6b20f85159 100644 (file)
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
 
 static int __init uda1380_modinit(void)
 {
-       int ret;
+       int ret = 0;
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        ret = i2c_add_driver(&uda1380_i2c_driver);
        if (ret != 0)
                pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
 #endif
-       return 0;
+       return ret;
 }
 module_init(uda1380_modinit);
 
index 7e5ec03f6f8dd579d1bd43413fd1d007a1989bcb..a7c9ae17fc7eb0e743a8dbfb27db88fea58a456e 100644 (file)
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
                snd_soc_write(codec, WM8731_PWR, 0xffff);
                regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
                                       wm8731->supplies);
+               codec->cache_sync = 1;
                break;
        }
        codec->dapm.bias_level = level;
index a9504710bb692e806655e785e5f35121ecf4afc1..3a629d0d690ed1fbe8129f096e0edf492faf9eb2 100644 (file)
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
        struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
        u16 ioctl;
 
+       if (wm8753->dai_func == ucontrol->value.integer.value[0])
+               return 0;
+
        if (codec->active)
                return -EBUSY;
 
index bfdc52370ad02de96bd9cf1859614db91e682949..d3b0a20744f1950ee2c0c882f77d72b6ab0a1959 100644 (file)
@@ -235,6 +235,7 @@ static int wm8776_hw_params(struct snd_pcm_substream *substream,
        switch (snd_pcm_format_width(params_format(params))) {
        case 16:
                iface = 0;
+               break;
        case 20:
                iface = 0x10;
                break;
index 0293763debe5811160fb3b7cc23284a91df2a94a..5a14d5c0e0e1b6dc98fb39140a4ca47f6bd18294 100644 (file)
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
        }
 
        if (memcmp(fw->data, "WMFW", 4) != 0) {
+               memcpy(&data32, fw->data, sizeof(data32));
+               data32 = be32_to_cpu(data32);
                dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
                        name, data32);
                goto err;
index 91d3c6dbeba3317758d747a6c6568cae625ea79e..53edd9a8c758f24943de1219b37cb45602fc11ef 100644 (file)
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec)
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
 static const unsigned int mixinpga_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(5),
        0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
        2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
        3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
 static const unsigned int classd_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(2),
        0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
        7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
 };
index eec8e143511665a538c6950a4f679ad217aad827..d1a142f48b09f03fb565a5c4a6419842a854c6f6 100644 (file)
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
 static const unsigned int drc_max_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(2),
        0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
        3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
 };
index 9c982e47eb99308b377e7143d8024c19863b14c5..d0c545b73d7865c04b9fefd286b85b7fe63fc5b2 100644 (file)
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
        bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
 
        lrclk = bclk_rate / params_rate(params);
+       if (!lrclk) {
+               dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
+                       bclk_rate);
+               return -EINVAL;
+       }
        dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
                lrclk, bclk_rate / lrclk);
 
@@ -3178,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
                switch (wm8994->revision) {
                case 0:
                case 1:
+               case 2:
+               case 3:
                        wm8994->hubs.dcs_codes_l = -9;
                        wm8994->hubs.dcs_codes_r = -5;
                        break;
index 645c980d6b80edd81b1f0886c013c34f884346d6..a33b04d1719537409eb186c02a7e5dedfcbc7683 100644 (file)
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
                break;
        case 24576000:
                ratediv = WM8996_SYSCLK_DIV;
+               wm8996->sysclk /= 2;
        case 12288000:
                snd_soc_update_bits(codec, WM8996_AIF_RATE,
                                    WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
index 3cd35a02c28c7164f525f00f7375ac5ef4d6bbb1..4a398c3bfe84aea9ef4f6c8540f09b8b2bf34282 100644 (file)
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
                        mdelay(100);
 
                        /* Normal bias enable & soft start off */
-                       reg |= WM9081_BIAS_ENA;
                        reg &= ~WM9081_VMID_RAMP;
                        snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
 
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
                }
 
                /* VMID 2*240k */
-               reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
+               reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
                reg &= ~WM9081_VMID_SEL_MASK;
                reg |= 0x04;
                snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
                break;
 
        case SND_SOC_BIAS_OFF:
-               /* Startup bias source */
+               /* Startup bias source and disable bias */
                reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
                reg |= WM9081_BIAS_SRC;
+               reg &= ~WM9081_BIAS_ENA;
                snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
 
-               /* Disable VMID and biases with soft ramping */
+               /* Disable VMID with soft ramping */
                reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
-               reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
+               reg &= ~WM9081_VMID_SEL_MASK;
                reg |= WM9081_VMID_RAMP;
                snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
 
index 2b5252c9e37774963a55626e284878e7ff777414..f94c06057c64c31ac6e4bd70fbf80799cc52837d 100644 (file)
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec)
 }
 
 static const unsigned int in_tlv[] = {
-       TLV_DB_RANGE_HEAD(6),
+       TLV_DB_RANGE_HEAD(3),
        0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
        1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
        4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
 };
 static const unsigned int mix_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(2),
        0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
        3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
 };
 static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
 static const unsigned int spkboost_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(2),
        0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
        7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
 };
index 84f33d4ea2cd5ec5461d0f8518c2462c3a58d544..48e61e912400fb2d2cca4ad74d9faf17a3075444 100644 (file)
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
 static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
 static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
 static const unsigned int spkboost_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(2),
        0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
        7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
 };
index 0268cf989736f303a224fbd26b0db2e8bf97f2ed..83c4bd5b2dd76bbf3f401c5a6bf0159d337f29b4 100644 (file)
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
 
        /* Initialize the the device_attribute structure */
        dev_attr = &ssi_private->dev_attr;
+       sysfs_attr_init(&dev_attr->attr);
        dev_attr->attr.name = "statistics";
        dev_attr->attr.mode = S_IRUGO;
        dev_attr->show = fsl_sysfs_ssi_show;
index 31af405bda843cc691e755cc6bb4a0afec78925f..ae49f1c78c6de797bd193946b1d350aa00bd8a40 100644 (file)
@@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
        }
 
        if (strcasecmp(sprop, "i2s-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
 
@@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
                }
                machine_data->clk_frequency = be32_to_cpup(iprop);
        } else if (strcasecmp(sprop, "i2s-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "lj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "lj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "rj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "rj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "ac97-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "ac97-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else {
index b133bfcc5848ea8f6ec3c7cab18772ac18bc07b9..738391757f2ccb1a5aa6a8883de0ba127fedea68 100644 (file)
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
 
 config SND_SOC_MX27VIS_AIC32X4
        tristate "SoC audio support for Visstrim M10 boards"
-       depends on MACH_IMX27_VISSTRIM_M10
+       depends on MACH_IMX27_VISSTRIM_M10 && I2C
        select SND_SOC_TLV320AIC32X4
        select SND_MXC_SOC_MX2
        help
index 8f49e165f4d1dd40119143b3b971a2b2f964025c..c62d715235e29ac5fa20639271d79958a57fd853 100644 (file)
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
 config SND_KIRKWOOD_SOC_OPENRD
        tristate "SoC Audio support for Kirkwood Openrd Client"
        depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+       depends on I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_CS42L51
        help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
 
 config SND_KIRKWOOD_SOC_T5325
        tristate "SoC Audio support for HP t5325"
-       depends on SND_KIRKWOOD_SOC && MACH_T5325
+       depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_ALC5623
        help
index dea5aa4aa6473a03231ff22e416bb61684c80046..f39d7dd9fbcb5956cf55f989ae0a869bfe7bd336 100644 (file)
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
        platform_driver_unregister(&mxs_pcm_driver);
 }
 module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
index 7fbeaec06eb4d514fdebb8aef5e4e1e568489b4c..1c57f6630a48d8ff1e3a626677114bbe8f024f27 100644 (file)
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
index 9c0edad90d8b4b3591c7c3a23f4c03b9e42594c1..a4e3237956e26dc499602f9b7397cf196c7ee5ec 100644 (file)
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
        if (ret)
                goto out3;
 
-       mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/
+       /* enbale ac97 multifunction pin */
+       mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
 
        return 0;
 
index ffd2242e305f0827fb742f2c2339041d1859785f..a0f7d3cfa470b0857586b10021290aec5b4fd20f 100644 (file)
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
 config SND_SOC_RAUMFELD
        tristate "SoC Audio support Raumfeld audio adapter"
        depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+       depends on I2C && SPI_MASTER
        select SND_PXA_SOC_SSP
        select SND_SOC_CS4270
        select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
 
 config SND_PXA2XX_SOC_HX4700
        tristate "SoC Audio support for HP iPAQ hx4700"
-       depends on SND_PXA2XX_SOC && MACH_H4700
+       depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_AK4641
        help
index 65c124831a0063f0b645062a13559348e5af367a..c664e33fb6d732c239e00d115df663a1b4b5908f 100644 (file)
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
        snd_soc_card_hx4700.dev = &pdev->dev;
        ret = snd_soc_register_card(&snd_soc_card_hx4700);
        if (ret)
-               return ret;
+               gpio_free_array(hx4700_audio_gpios,
+                               ARRAY_SIZE(hx4700_audio_gpios));
 
-       return 0;
+       return ret;
 }
 
 static int __devexit hx4700_audio_remove(struct platform_device *pdev)
index 1826acf20f7c96cf3fdca744d083c29f2bdfd837..8e523fd9189e562557b36b39671d9e12b7deefa2 100644 (file)
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int err;
 
        /* These endpoints are not being used. */
        snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
        .dai_link       = &jive_dai,
        .num_links      = 1,
 
-       .dapm_widgtets  = wm8750_dapm_widgets,
+       .dapm_widgets   = wm8750_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
        .dapm_routes    = audio_map,
        .num_dapm_routes = ARRAY_SIZE(audio_map),
index 3a0dbfc793f0fc5d6c36b9bc924ec11ff07970ff..8bd1dc5706bf1f423f5d0c7d63a7316c5b9191d0 100644 (file)
@@ -12,6 +12,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <sound/soc.h>
 
 static struct snd_soc_card smdk2443;
index f75e43997d5beb5b27a82f8d023eddd33311538e..ad9ac42522e2539faf84c6286cc1614573e441f2 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "../codecs/wm8994.h"
 #include <sound/pcm_params.h>
+#include <linux/module.h>
 
  /*
   * Default CFG switch settings to use this driver:
index 85bf541a771d05226b761d6d274e7affd8cba6e2..4b8e35410eb1962623cc31882967397747e5b3e7 100644 (file)
@@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card)
        snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
-       snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+       snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
 
index a5d3685a5d38049313391ddb8e174edd9c28a21b..a25fa63ce9a27501a4f2d4a6334911076200532e 100644 (file)
@@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev)
        struct snd_soc_card *card = dev_get_drvdata(dev);
        int i, ac97_control = 0;
 
+       /* If the initialization of this soc device failed, there is no codec
+        * associated with it. Just bail out in this case.
+        */
+       if (list_empty(&card->codec_dev_list))
+               return 0;
+
        /* AC97 devices might have other drivers hanging off them so
         * need to resume immediately.  Other drivers don't have that
         * problem and may take a substantial amount of time to resume
index 0c12b98484bdd8316418358b5cead696e8774c57..4220bb0f27301aa962964b9eb645fd0f5e51e17c 100644 (file)
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
 }
 EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
 
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+       .formats                = 0xffffffff,
+       .channels_min           = 1,
+       .channels_max           = UINT_MAX,
+
+       /* Random values to keep userspace happy when checking constraints */
+       .info                   = SNDRV_PCM_INFO_INTERLEAVED |
+                                 SNDRV_PCM_INFO_BLOCK_TRANSFER,
+       .buffer_bytes_max       = 128*1024,
+       .period_bytes_min       = PAGE_SIZE,
+       .period_bytes_max       = PAGE_SIZE*2,
+       .periods_min            = 2,
+       .periods_max            = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+       snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+       return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+       .open           = dummy_dma_open,
+       .ioctl          = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+       .ops = &dummy_dma_ops,
+};
 
 static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
 {
index b61945f3af9e594aa28b2d992239bd4f4a714c6b..32d2a21f2e3b5a401948d24b81c945e239e8fa4e 100644 (file)
@@ -1632,6 +1632,37 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /* Roland GAIA SH-01 */
+       USB_DEVICE(0x0582, 0x0111),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Roland",
+               .product_name = "GAIA",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                               .out_cables = 0x0003,
+                               .in_cables  = 0x0003
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 {
        USB_DEVICE(0x0582, 0x0113),
        .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
index 7d98676808d8722a39623de8983642f987cea7cc..955930e0a5c34cd0852f92aa39a7c73492c159b5 100644 (file)
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
 
        list_for_each_entry(counter, &evsel_list->entries, node) {
                if (create_perf_stat_counter(counter, first) < 0) {
-                       if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+                       if (errno == EINVAL || errno == ENOSYS ||
+                           errno == ENOENT || errno == EOPNOTSUPP) {
                                if (verbose)
                                        ui__warning("%s event is not supported by the kernel.\n",
                                                    event_name(counter));
index e42626422587851b9c1b6e755dfdb09858640124..d7915d4e77cb629e4560d499ac2c1c902ecce5db 100644 (file)
@@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type)
        return size;
 }
 
+static void hists__init(struct hists *hists)
+{
+       memset(hists, 0, sizeof(*hists));
+       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+       hists->entries_in = &hists->entries_in_array[0];
+       hists->entries_collapsed = RB_ROOT;
+       hists->entries = RB_ROOT;
+       pthread_mutex_init(&hists->lock, NULL);
+}
+
 void perf_evsel__init(struct perf_evsel *evsel,
                      struct perf_event_attr *attr, int idx)
 {
index bcd05d05b4f01969906efe3dbfcc899ccdea554a..33c17a2b2a81e739066991a9a9facd2b38ee0416 100644 (file)
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
                /*
                 * write event string as passed on cmdline
                 */
-               ret = do_write_string(fd, attr->name);
+               ret = do_write_string(fd, event_name(attr));
                if (ret < 0)
                        return ret;
                /*
index a36a3fa81ffba45ea6602145530289c1c326acbc..abef2703cd242eb8b8e5f1763cadd50286bdc8be 100644 (file)
@@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
 
        return ret;
 }
-
-void hists__init(struct hists *hists)
-{
-       memset(hists, 0, sizeof(*hists));
-       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
-       hists->entries_in = &hists->entries_in_array[0];
-       hists->entries_collapsed = RB_ROOT;
-       hists->entries = RB_ROOT;
-       pthread_mutex_init(&hists->lock, NULL);
-}
index c86c1d27bd1eca09cef6c00949293a251b345ef9..89289c8e935e78973a906fb96edd164973d20ac9 100644 (file)
@@ -63,8 +63,6 @@ struct hists {
        struct callchain_cursor callchain_cursor;
 };
 
-void hists__init(struct hists *hists);
-
 struct hist_entry *__hists__add_entry(struct hists *self,
                                      struct addr_location *al,
                                      struct symbol *parent, u64 period);
index 85c1e6b76f0a4bbdd3d2c5b9e0d7359733dbfda7..0f4555ce90635a767f4a609b917905b5f58bdd0a 100644 (file)
@@ -1333,6 +1333,10 @@ int perf_session__cpu_bitmap(struct perf_session *session,
        }
 
        map = cpu_map__new(cpu_list);
+       if (map == NULL) {
+               pr_err("Invalid cpu_list\n");
+               return -1;
+       }
 
        for (i = 0; i < map->nr; i++) {
                int cpu = map->map[i];
index 0a7ed5b5e281c88b321de87ced66a3d29ebb003d..6c164dc9ee957dbf3df642f712b2fdc1485d2dc6 100644 (file)
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
        field = malloc_or_die(sizeof(*field));
 
        type = process_arg(event, field, &token);
+       while (type == EVENT_OP)
+               type = process_op(event, field, &token);
        if (test_type_token(type, token, EVENT_DELIM, ","))
                goto out_free;
 
index 30e2befd6f2a237b7a04bd1125386ffa26c7a3ac..8b4c2535b266a2abe17d3ad495b3a0fdfa8855a9 100755 (executable)
@@ -747,6 +747,18 @@ sub __eval_option {
     # Add space to evaluate the character before $
     $option = " $option";
     my $retval = "";
+    my $repeated = 0;
+    my $parent = 0;
+
+    foreach my $test (keys %repeat_tests) {
+       if ($i >= $test &&
+           $i < $test + $repeat_tests{$test}) {
+
+           $repeated = 1;
+           $parent = $test;
+           last;
+       }
+    }
 
     while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) {
        my $start = $1;
@@ -760,10 +772,14 @@ sub __eval_option {
        # otherwise see if the default OPT (without [$i]) exists.
 
        my $o = "$var\[$i\]";
+       my $parento = "$var\[$parent\]";
 
        if (defined($opt{$o})) {
            $o = $opt{$o};
            $retval = "$retval$o";
+       } elsif ($repeated && defined($opt{$parento})) {
+           $o = $opt{$parento};
+           $retval = "$retval$o";
        } elsif (defined($opt{$var})) {
            $o = $opt{$var};
            $retval = "$retval$o";
index 3ad0925d23a9c85e39b508021ef9fcf2bbb1ad81..758e3b36d4cfd525846a1968987d80bea8e4bcce 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
 #include "irq.h"
 
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -480,12 +482,76 @@ out:
        return r;
 }
 
+/*
+ * We want to test whether the caller has been granted permissions to
+ * use this device.  To be able to configure and control the device,
+ * the user needs access to PCI configuration space and BAR resources.
+ * These are accessed through PCI sysfs.  PCI config space is often
+ * passed to the process calling this ioctl via file descriptor, so we
+ * can't rely on access to that file.  We can check for permissions
+ * on each of the BAR resource files, which is a pretty clear
+ * indicator that the user has been granted access to the device.
+ */
+static int probe_sysfs_permissions(struct pci_dev *dev)
+{
+#ifdef CONFIG_SYSFS
+       int i;
+       bool bar_found = false;
+
+       for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
+               char *kpath, *syspath;
+               struct path path;
+               struct inode *inode;
+               int r;
+
+               if (!pci_resource_len(dev, i))
+                       continue;
+
+               kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+               if (!kpath)
+                       return -ENOMEM;
+
+               /* Per sysfs-rules, sysfs is always at /sys */
+               syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
+               kfree(kpath);
+               if (!syspath)
+                       return -ENOMEM;
+
+               r = kern_path(syspath, LOOKUP_FOLLOW, &path);
+               kfree(syspath);
+               if (r)
+                       return r;
+
+               inode = path.dentry->d_inode;
+
+               r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
+               path_put(&path);
+               if (r)
+                       return r;
+
+               bar_found = true;
+       }
+
+       /* If no resources, probably something special */
+       if (!bar_found)
+               return -EPERM;
+
+       return 0;
+#else
+       return -EINVAL; /* No way to control the device without sysfs */
+#endif
+}
+
 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
                                      struct kvm_assigned_pci_dev *assigned_dev)
 {
        int r = 0, idx;
        struct kvm_assigned_dev_kernel *match;
        struct pci_dev *dev;
+       u8 header_type;
+
+       if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
+               return -EINVAL;
 
        mutex_lock(&kvm->lock);
        idx = srcu_read_lock(&kvm->srcu);
@@ -513,6 +579,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
                r = -EINVAL;
                goto out_free;
        }
+
+       /* Don't allow bridges to be assigned */
+       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+       if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+               r = -EPERM;
+               goto out_put;
+       }
+
+       r = probe_sysfs_permissions(dev);
+       if (r)
+               goto out_put;
+
        if (pci_enable_device(dev)) {
                printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
                r = -EBUSY;
@@ -544,16 +622,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 
        list_add(&match->list, &kvm->arch.assigned_dev_head);
 
-       if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
-               if (!kvm->arch.iommu_domain) {
-                       r = kvm_iommu_map_guest(kvm);
-                       if (r)
-                               goto out_list_del;
-               }
-               r = kvm_assign_device(kvm, match);
+       if (!kvm->arch.iommu_domain) {
+               r = kvm_iommu_map_guest(kvm);
                if (r)
                        goto out_list_del;
        }
+       r = kvm_assign_device(kvm, match);
+       if (r)
+               goto out_list_del;
 
 out:
        srcu_read_unlock(&kvm->srcu, idx);
@@ -593,8 +669,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
                goto out;
        }
 
-       if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
-               kvm_deassign_device(kvm, match);
+       kvm_deassign_device(kvm, match);
 
        kvm_free_assigned_device(kvm, match);