]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'asoc-v3.17-rc4' into asoc-linus
authorMark Brown <broonie@kernel.org>
Wed, 8 Oct 2014 15:44:36 +0000 (16:44 +0100)
committerMark Brown <broonie@kernel.org>
Wed, 8 Oct 2014 15:44:36 +0000 (16:44 +0100)
ASoC: Fixes for v3.17

Another round of again fairly unexciting fixes - several driver fixes,
an e-mail address change and a fix for error handling with DPCM.

# gpg: Signature made Wed 10 Sep 2014 12:26:54 BST using RSA key ID 5D5487D0
# gpg: Good signature from "Mark Brown <broonie@sirena.org.uk>"
# gpg:                 aka "Mark Brown <broonie@debian.org>"
# gpg:                 aka "Mark Brown <broonie@kernel.org>"
# gpg:                 aka "Mark Brown <broonie@tardis.ed.ac.uk>"
# gpg:                 aka "Mark Brown <broonie@linaro.org>"
# gpg:                 aka "Mark Brown <Mark.Brown@linaro.org>"

784 files changed:
Documentation/DocBook/media/v4l/compat.xml
Documentation/DocBook/media/v4l/func-poll.xml
Documentation/DocBook/media/v4l/v4l2.xml
Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
Documentation/cgroups/cpusets.txt
Documentation/devicetree/bindings/dma/rcar-audmapp.txt
Documentation/devicetree/bindings/input/atmel,maxtouch.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/sound/rockchip-i2s.txt
Documentation/devicetree/bindings/spi/spi-rockchip.txt
Documentation/devicetree/bindings/staging/imx-drm/ldb.txt
Documentation/devicetree/bindings/usb/mxs-phy.txt
Documentation/devicetree/bindings/video/analog-tv-connector.txt
Documentation/devicetree/of_selftest.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
Documentation/networking/filter.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/k2e-clocks.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/tls.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/xen/page-coherent.h
arch/arm/include/asm/xen/page.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/irq.c
arch/arm/kernel/kprobes-test.c
arch/arm/kernel/kprobes-test.h
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/process.c
arch/arm/kernel/swp_emulate.c
arch/arm/kernel/thumbee.c
arch/arm/kernel/traps.c
arch/arm/lib/getuser.S
arch/arm/mach-imx/clk-gate2.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/prm3xxx.c
arch/arm/mach-pxa/generic.c
arch/arm/mm/alignment.c
arch/arm/mm/proc-v7-3level.S
arch/arm/plat-omap/Kconfig
arch/arm/xen/Makefile
arch/arm/xen/enlighten.c
arch/arm/xen/mm32.c [new file with mode: 0644]
arch/arm/xen/p2m.c
arch/arm64/kernel/irq.c
arch/arm64/kernel/process.c
arch/arm64/kernel/sys_compat.c
arch/arm64/mm/init.c
arch/ia64/configs/bigsur_defconfig
arch/ia64/configs/generic_defconfig
arch/ia64/configs/gensparse_defconfig
arch/ia64/configs/sim_defconfig
arch/ia64/configs/tiger_defconfig
arch/ia64/configs/zx1_defconfig
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/pci/fixup.c
arch/microblaze/Kconfig
arch/microblaze/include/asm/entry.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/include/asm/unistd.h
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/bcm63xx/irq.c
arch/mips/boot/compressed/decompress.c
arch/mips/configs/gpr_defconfig
arch/mips/configs/ip27_defconfig
arch/mips/configs/jazz_defconfig
arch/mips/configs/loongson3_defconfig
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig
arch/mips/configs/malta_kvm_guest_defconfig
arch/mips/configs/mtx1_defconfig
arch/mips/configs/nlm_xlp_defconfig
arch/mips/configs/nlm_xlr_defconfig
arch/mips/configs/rm200_defconfig
arch/mips/include/asm/cop2.h
arch/mips/include/asm/mach-ip28/spaces.h
arch/mips/include/asm/page.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/topology.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/machine_kexec.c
arch/mips/kernel/mcount.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/math-emu/cp1emu.c
arch/mips/mm/init.c
arch/mips/net/bpf_jit.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/configs/a500_defconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/hpux/sys_hpux.c
arch/parisc/include/asm/seccomp.h [new file with mode: 0644]
arch/parisc/include/asm/thread_info.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/powerpc/configs/c2k_defconfig
arch/powerpc/configs/cell_defconfig
arch/powerpc/configs/celleb_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/g5_defconfig
arch/powerpc/configs/maple_defconfig
arch/powerpc/configs/pasemi_defconfig
arch/powerpc/configs/pmac32_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/ps3_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/pseries_le_defconfig
arch/powerpc/include/asm/ptrace.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/perf/callchain.c
arch/powerpc/platforms/powernv/opal-hmi.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/include/asm/ipl.h
arch/s390/kernel/ipl.c
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/mm/init.c
arch/sh/configs/sdk7780_defconfig
arch/sh/configs/sh2007_defconfig
arch/sh/mm/gup.c
arch/sparc/configs/sparc64_defconfig
arch/sparc/net/bpf_jit_asm.S
arch/sparc/net/bpf_jit_comp.c
arch/x86/Kconfig
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/aslr.c
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/crypto/aesni-intel_glue.c
arch/x86/include/asm/bitops.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/pgtable_64.h
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/smpboot.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/mmap.c
arch/x86/pci/fixup.c
arch/x86/xen/mmu.c
block/blk-exec.c
block/blk-merge.c
block/blk-mq.c
block/blk-sysfs.c
block/genhd.c
block/partition-generic.c
crypto/drbg.c
drivers/acpi/acpi_cmos_rtc.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exprep.c
drivers/acpi/battery.c
drivers/acpi/bus.c
drivers/acpi/container.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/ata/ahci.c
drivers/ata/ahci_tegra.c
drivers/ata/ahci_xgene.c
drivers/ata/ata_piix.c
drivers/ata/pata_jmicron.c
drivers/base/regmap/regmap-debugfs.c
drivers/bcma/host_pci.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/bus/omap_l3_noc.h
drivers/char/hw_random/virtio-rng.c
drivers/clk/at91/clk-slow.c
drivers/clk/clk-efm32gg.c
drivers/clk/clk.c
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/divider.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_opp.c
drivers/cpufreq/integrator-cpufreq.c
drivers/cpufreq/pcc-cpufreq.c
drivers/crypto/ccp/ccp-crypto-main.c
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
drivers/dma/dma-jz4740.c
drivers/dma/omap-dma.c
drivers/firmware/efi/Makefile
drivers/firmware/efi/libstub/fdt.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/vga/vga_switcheroo.c
drivers/gpu/vga/vgaarb.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/tmp103.c
drivers/i2c/Makefile
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-acpi.c [deleted file]
drivers/i2c/i2c-core.c
drivers/iio/accel/bma180.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/xilinx-xadc-core.c
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/gyro/itg3200_buffer.c
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
drivers/iio/inkern.c
drivers/iio/magnetometer/st_magn_core.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_user_pages.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cap1106.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/mouse/synaptics_usb.c
drivers/input/mouse/trackpoint.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/serio/serport.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/wm9712.c
drivers/input/touchscreen/wm9713.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/iommu.c
drivers/irqchip/exynos-combiner.c
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/md/dm-cache-target.c
drivers/md/raid1.c
drivers/md/raid5.c
drivers/media/Kconfig
drivers/media/common/cx2341x.c
drivers/media/dvb-core/dvb-usb-ids.h
drivers/media/dvb-frontends/af9033.c
drivers/media/dvb-frontends/af9033_priv.h
drivers/media/dvb-frontends/cx24123.c
drivers/media/i2c/adv7604.c
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/pci/cx18/cx18-driver.c
drivers/media/radio/radio-miropcm20.c
drivers/media/tuners/tuner_it913x.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/em28xx/em28xx-cards.c
drivers/media/usb/em28xx/em28xx-video.c
drivers/media/usb/em28xx/em28xx.h
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-dma-sg.c
drivers/message/fusion/Kconfig
drivers/misc/lattice-ecp3-config.c
drivers/net/bonding/bond_main.c
drivers/net/can/at91_can.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/aeroflex/greth.h
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/apm/xgene/Kconfig
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/fddi/skfp/h/skfbi.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/usb/r8152.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath9k/common-beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/spectral.c
drivers/net/wireless/brcm80211/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/Makefile
drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
drivers/net/wireless/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.h
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/dvm/power.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/sf.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/xen-netback/interface.c
drivers/nfc/microread/microread.c
drivers/nfc/st21nfca/Makefile
drivers/nfc/st21nfcb/Makefile
drivers/ntb/ntb_transport.c
drivers/of/base.c
drivers/of/dynamic.c
drivers/of/fdt.c
drivers/parisc/dino.c
drivers/parisc/pdc_stable.c
drivers/parisc/superio.c
drivers/pci/host/pci-imx6.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/hotplug/pcihp_slot.c
drivers/pci/probe.c
drivers/phy/Kconfig
drivers/phy/phy-exynos5-usbdrd.c
drivers/phy/phy-miphy365x.c
drivers/phy/phy-twl4030-usb.c
drivers/pinctrl/pinctrl-baytrail.c
drivers/regulator/88pm8607.c
drivers/regulator/da9052-regulator.c
drivers/regulator/max8907-regulator.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8997.c
drivers/regulator/palmas-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/rtc/rtc-efi.c
drivers/s390/block/dasd_devmap.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_sys.c
drivers/scsi/Kconfig
drivers/scsi/bnx2fc/Kconfig
drivers/scsi/bnx2i/Kconfig
drivers/scsi/csiostor/Kconfig
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/libiscsi.c
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/scsi_lib.c
drivers/soc/qcom/qcom_gsbi.c
drivers/spi/spi-davinci.c
drivers/spi/spi-dw.c
drivers/spi/spi-fsl-espi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-pl022.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-sirf.c
drivers/ssb/b43_pci_bridge.c
drivers/staging/android/sync.c
drivers/staging/iio/meter/ade7758_trigger.c
drivers/staging/imx-drm/imx-ldb.c
drivers/staging/imx-drm/ipuv3-plane.c
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/vt6655/hostap.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_configfs.c
drivers/target/target_core_spc.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/xilinx_uartps.c
drivers/usb/chipidea/ci_hdrc_msm.c
drivers/usb/core/hub.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_fs.h
drivers/usb/gadget/udc/fusb300_udc.h
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/pipe.c
drivers/usb/renesas_usbhs/pipe.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/sierra.c
drivers/usb/serial/zte_ev.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_devs.h
drivers/uwb/lc-dev.c
drivers/video/fbdev/amba-clcd.c
drivers/xen/balloon.c
drivers/xen/gntalloc.c
drivers/xen/manage.c
fs/btrfs/btrfs_inode.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/buffer.c
fs/cachefiles/bind.c
fs/cachefiles/daemon.c
fs/cachefiles/internal.h
fs/cachefiles/main.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/cachefiles/xattr.c
fs/cifs/Kconfig
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2inode.c
fs/cifs/smb2maperror.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/dcache.c
fs/direct-io.c
fs/eventpoll.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/fscache/object.c
fs/fscache/page.c
fs/fuse/file.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/super.c
fs/lockd/svc.c
fs/namei.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nilfs2/inode.c
fs/notify/fdinfo.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/super.c
fs/proc/task_mmu.c
fs/udf/ialloc.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/super.c
fs/udf/udfdecl.h
fs/ufs/ialloc.c
fs/ufs/namei.c
include/acpi/acpi_bus.h
include/crypto/drbg.h
include/linux/ccp.h
include/linux/cpuset.h
include/linux/dcache.h
include/linux/hash.h
include/linux/i2c.h
include/linux/iio/trigger.h
include/linux/jiffies.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/pci.h
include/linux/percpu-refcount.h
include/linux/sched.h
include/linux/uio.h
include/linux/vga_switcheroo.h
include/linux/vgaarb.h
include/linux/workqueue.h
include/media/videobuf2-core.h
include/net/addrconf.h
include/net/bluetooth/hci_core.h
include/net/dst.h
include/net/genetlink.h
include/net/ip6_fib.h
include/net/net_namespace.h
include/net/netns/ieee802154_6lowpan.h
include/net/regulatory.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sock.h
include/net/wimax.h
include/rdma/ib_umem.h
include/scsi/scsi_tcq.h
include/trace/events/irq.h
include/uapi/linux/Kbuild
include/uapi/linux/input.h
include/xen/interface/features.h
init/Kconfig
init/do_mounts.c
kernel/cgroup.c
kernel/cpuset.c
kernel/events/core.c
kernel/fork.c
kernel/futex.c
kernel/kcmp.c
kernel/power/snapshot.c
kernel/printk/printk.c
kernel/time/alarmtimer.c
kernel/time/time.c
kernel/trace/ring_buffer.c
lib/Kconfig
lib/assoc_array.c
lib/genalloc.c
lib/hweight.c
lib/percpu-refcount.c
lib/rhashtable.c
lib/string.c
mm/dmapool.c
mm/huge_memory.c
mm/iov_iter.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/mmap.c
mm/nobootmem.c
mm/page_alloc.c
mm/percpu-vm.c
mm/percpu.c
mm/shmem.c
mm/slab.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/ceph/auth_x.c
net/ceph/mon_client.c
net/core/datagram.c
net/core/dev.c
net/core/gen_estimator.c
net/core/gen_stats.c
net/core/skbuff.c
net/core/sock.c
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/reassembly.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/route.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/anycast.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/mcast.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/route.c
net/l2tp/l2tp_ppp.c
net/mac80211/chan.c
net/mac80211/debugfs_sta.c
net/mac80211/iface.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/sta_info.c
net/mac802154/wpan.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nfnetlink.c
net/netfilter/nft_hash.c
net/netfilter/nft_rbtree.c
net/netfilter/xt_cgroup.c
net/openvswitch/datapath.c
net/rfkill/rfkill-gpio.c
net/rxrpc/ar-key.c
net/sched/ematch.c
net/sched/sch_choke.c
net/sctp/socket.c
net/socket.c
net/wireless/nl80211.c
net/xfrm/xfrm_policy.c
scripts/checkpatch.pl
scripts/tags.sh
sound/core/pcm_lib.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/rt286.c
sound/soc/codecs/ssm2602.c
sound/soc/fsl/fsl_ssi.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/usb/caiaq/control.c
tools/usb/usbip/libsrc/usbip_common.h
virt/kvm/arm/vgic-v2.c
virt/kvm/kvm_main.c

index eee6f0f4aa433cb535d6375bf307273a8e3d1c22..3a626d1b8f2e4d8022831f0961fbb83bc359b487 100644 (file)
@@ -2545,6 +2545,30 @@ fields changed from _s32 to _u32.
       </orderedlist>
     </section>
 
+    <section>
+      <title>V4L2 in Linux 3.16</title>
+      <orderedlist>
+        <listitem>
+         <para>Added event V4L2_EVENT_SOURCE_CHANGE.
+         </para>
+        </listitem>
+      </orderedlist>
+    </section>
+
+    <section>
+      <title>V4L2 in Linux 3.17</title>
+      <orderedlist>
+        <listitem>
+         <para>Extended &v4l2-pix-format;. Added format flags.
+         </para>
+        </listitem>
+        <listitem>
+         <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;.
+         </para>
+        </listitem>
+      </orderedlist>
+    </section>
+
     <section id="other">
       <title>Relation of V4L2 to other Linux multimedia APIs</title>
 
index 85cad8bff5ba79aecec18c721650d5994137bbc7..4c73f115219b5f4f923250e83dac3f001cbfc094 100644 (file)
@@ -29,9 +29,12 @@ can suspend execution until the driver has captured data or is ready
 to accept data for output.</para>
 
     <para>When streaming I/O has been negotiated this function waits
-until a buffer has been filled or displayed and can be dequeued with
-the &VIDIOC-DQBUF; ioctl. When buffers are already in the outgoing
-queue of the driver the function returns immediately.</para>
+until a buffer has been filled by the capture device and can be dequeued
+with the &VIDIOC-DQBUF; ioctl. For output devices this function waits
+until the device is ready to accept a new buffer to be queued up with
+the &VIDIOC-QBUF; ioctl for display. When buffers are already in the outgoing
+queue of the driver (capture) or the incoming queue isn't full (display)
+the function returns immediately.</para>
 
     <para>On success <function>poll()</function> returns the number of
 file descriptors that have been selected (that is, file descriptors
@@ -44,10 +47,22 @@ Capture devices set the <constant>POLLIN</constant> and
 flags. When the function timed out it returns a value of zero, on
 failure it returns <returnvalue>-1</returnvalue> and the
 <varname>errno</varname> variable is set appropriately. When the
-application did not call &VIDIOC-QBUF; or &VIDIOC-STREAMON; yet the
+application did not call &VIDIOC-STREAMON; the
 <function>poll()</function> function succeeds, but sets the
 <constant>POLLERR</constant> flag in the
-<structfield>revents</structfield> field.</para>
+<structfield>revents</structfield> field. When the
+application has called &VIDIOC-STREAMON; for a capture device but hasn't
+yet called &VIDIOC-QBUF;, the <function>poll()</function> function
+succeeds and sets the <constant>POLLERR</constant> flag in the
+<structfield>revents</structfield> field. For output devices this
+same situation will cause <function>poll()</function> to succeed
+as well, but it sets the <constant>POLLOUT</constant> and
+<constant>POLLWRNORM</constant> flags in the <structfield>revents</structfield>
+field.</para>
+
+    <para>If an event occurred (see &VIDIOC-DQEVENT;) then
+<constant>POLLPRI</constant> will be set in the <structfield>revents</structfield>
+field and <function>poll()</function> will return.</para>
 
     <para>When use of the <function>read()</function> function has
 been negotiated and the driver does not capture yet, the
@@ -58,10 +73,18 @@ continuously (as opposed to, for example, still images) the function
 may return immediately.</para>
 
     <para>When use of the <function>write()</function> function has
-been negotiated the <function>poll</function> function just waits
+been negotiated and the driver does not stream yet, the
+<function>poll</function> function starts streaming. When that fails
+it returns a <constant>POLLERR</constant> as above. Otherwise it waits
 until the driver is ready for a non-blocking
 <function>write()</function> call.</para>
 
+    <para>If the caller is only interested in events (just
+<constant>POLLPRI</constant> is set in the <structfield>events</structfield>
+field), then <function>poll()</function> will <emphasis>not</emphasis>
+start streaming if the driver does not stream yet. This makes it
+possible to just poll for events and not for buffers.</para>
+
     <para>All drivers implementing the <function>read()</function> or
 <function>write()</function> function or streaming I/O must also
 support the <function>poll()</function> function.</para>
index f2f81f06a17ba54718b693c54d024a6640576a5f..7cfe618f754d35fecfb9c55e7ef3efad72c539f4 100644 (file)
@@ -152,10 +152,11 @@ structs, ioctls) must be noted in more detail in the history chapter
 applications. -->
 
       <revision>
-       <revnumber>3.16</revnumber>
-       <date>2014-05-27</date>
-       <authorinitials>lp</authorinitials>
-       <revremark>Extended &v4l2-pix-format;. Added format flags.
+       <revnumber>3.17</revnumber>
+       <date>2014-08-04</date>
+       <authorinitials>lp, hv</authorinitials>
+       <revremark>Extended &v4l2-pix-format;. Added format flags. Added compound control types
+and VIDIOC_QUERY_EXT_CTRL.
        </revremark>
       </revision>
 
@@ -538,7 +539,7 @@ and discussions on the V4L mailing list.</revremark>
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.14</subtitle>
+ <subtitle>Revision 3.17</subtitle>
 
   <chapter id="common">
     &sub-common;
index 1ba9e999af3fec1bed5a3a116f37ac0358caf4e0..c62a7360719b56c32d227717cfc93a82e9ca30cc 100644 (file)
          </row>
          <row>
            <entry>&v4l2-rect;</entry>
-           <entry><structfield>rect</structfield></entry>
+           <entry><structfield>r</structfield></entry>
            <entry>Selection rectangle, in pixels.</entry>
          </row>
          <row>
index 7740038d82bcb5669ab946ab6fa580b4920d0041..3c94ff3f9693c4fa876ddf47948a02258e2f2470 100644 (file)
@@ -345,14 +345,14 @@ the named feature on.
 The implementation is simple.
 
 Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag
-PF_SPREAD_PAGE for each task that is in that cpuset or subsequently
+PFA_SPREAD_PAGE for each task that is in that cpuset or subsequently
 joins that cpuset.  The page allocation calls for the page cache
-is modified to perform an inline check for this PF_SPREAD_PAGE task
+is modified to perform an inline check for this PFA_SPREAD_PAGE task
 flag, and if set, a call to a new routine cpuset_mem_spread_node()
 returns the node to prefer for the allocation.
 
 Similarly, setting 'cpuset.memory_spread_slab' turns on the flag
-PF_SPREAD_SLAB, and appropriately marked slab caches will allocate
+PFA_SPREAD_SLAB, and appropriately marked slab caches will allocate
 pages from the node returned by cpuset_mem_spread_node().
 
 The cpuset_mem_spread_node() routine is also simple.  It uses the
index 9f1d750d76de9ff0d9858366e037a9635f353087..61bca509d7b99f734d1c7f054587aaa29dff8261 100644 (file)
@@ -16,9 +16,9 @@ Example:
 * DMA client
 
 Required properties:
-- dmas:                a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs,
-               where SRS/DRS values are fixed handles, specified in the SoC
-               manual as the value that would be written into the PDMACHCR.
+- dmas:                a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs.
+               where SRS/DRS are specified in the SoC manual.
+               It will be written into PDMACHCR as high 16-bit parts.
 - dma-names:   a list of DMA channel names, one per "dmas" entry
 
 Example:
index 0ac23f2ed1040c2c9f820677fdf3f5092a7c1a6d..1852906517ab1c1b691fb4df75f8b5bdd7c9c78c 100644 (file)
@@ -11,10 +11,6 @@ Required properties:
 
 Optional properties for main touchpad device:
 
-- linux,gpio-keymap: An array of up to 4 entries indicating the Linux
-    keycode generated by each GPIO. Linux keycodes are defined in
-    <dt-bindings/input/input.h>.
-
 - linux,gpio-keymap: When enabled, the SPT_GPIOPWN_T19 object sends messages
     on GPIO bit changes. An array of up to 8 entries can be provided
     indicating the Linux keycode mapped to each bit of the status byte,
index 9b03c57563a49d2145fb47c2a5086ff71506fe3f..e45ac3f926b164636b73e4280cf40ddfde314203 100644 (file)
@@ -39,6 +39,10 @@ Optional properties:
   further clocks may be specified in derived bindings.
 - clock-names: One name for each entry in the clocks property, the
   first one should be "stmmaceth".
+- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
+  available this clock is used for programming the Timestamp Addend Register.
+  If not passed then the system clock will be used and this is fine on some
+  platforms.
 
 Examples:
 
index 6c55fcfe5e1d095f5b3303df5351c99a7319d86b..9b82c20b306bb1e10e6bf8be84dc2a9c97933438 100644 (file)
@@ -31,7 +31,7 @@ i2s@ff890000 {
        #address-cells = <1>;
        #size-cells = <0>;
        dmas = <&pdma1 0>, <&pdma1 1>;
-       dma-names = "rx", "tx";
+       dma-names = "tx", "rx";
        clock-names = "i2s_hclk", "i2s_clk";
        clocks = <&cru HCLK_I2S0>, <&cru SCLK_I2S0>;
 };
index 7bab355758173f428ef256e73bd06f2ac6221f8e..467dec441c62a545f3d011f45c8707f35287ded4 100644 (file)
@@ -16,11 +16,15 @@ Required Properties:
 - clocks: Must contain an entry for each entry in clock-names.
 - clock-names: Shall be "spiclk" for the transfer-clock, and "apb_pclk" for
                           the peripheral clock.
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+
+Optional Properties:
+
 - dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
                Documentation/devicetree/bindings/dma/dma.txt
 - dma-names: DMA request names should include "tx" and "rx" if present.
-- #address-cells: should be 1.
-- #size-cells: should be 0.
+
 
 Example:
 
index 578a1fca366e67b8e71f7c61daf7f07979062364..443bcb6134d53ca6afc8587b611a82ec30e89d22 100644 (file)
@@ -56,6 +56,9 @@ Required properties:
  - fsl,data-width : should be <18> or <24>
  - port: A port node with endpoint definitions as defined in
    Documentation/devicetree/bindings/media/video-interfaces.txt.
+   On i.MX5, the internal two-input-multiplexer is used.
+   Due to hardware limitations, only one port (port@[0,1])
+   can be used for each channel (lvds-channel@[0,1], respectively)
    On i.MX6, there should be four ports (port@[0-3]) that correspond
    to the four LVDS multiplexer inputs.
 
@@ -78,6 +81,8 @@ ldb: ldb@53fa8008 {
                      "di0", "di1";
 
        lvds-channel@0 {
+               #address-cells = <1>;
+               #size-cells = <0>;
                reg = <0>;
                fsl,data-mapping = "spwg";
                fsl,data-width = <24>;
@@ -86,7 +91,9 @@ ldb: ldb@53fa8008 {
                        /* ... */
                };
 
-               port {
+               port@0 {
+                       reg = <0>;
+
                        lvds0_in: endpoint {
                                remote-endpoint = <&ipu_di0_lvds0>;
                        };
@@ -94,6 +101,8 @@ ldb: ldb@53fa8008 {
        };
 
        lvds-channel@1 {
+               #address-cells = <1>;
+               #size-cells = <0>;
                reg = <1>;
                fsl,data-mapping = "spwg";
                fsl,data-width = <24>;
@@ -102,7 +111,9 @@ ldb: ldb@53fa8008 {
                        /* ... */
                };
 
-               port {
+               port@1 {
+                       reg = <1>;
+
                        lvds1_in: endpoint {
                                remote-endpoint = <&ipu_di1_lvds1>;
                        };
index cef181a9d8bd6506699cd98889fd36f8f6bbc306..96681c93b86df38788327df42c543738293265ea 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
        * "fsl,imx23-usbphy" for imx23 and imx28
        * "fsl,imx6q-usbphy" for imx6dq and imx6dl
        * "fsl,imx6sl-usbphy" for imx6sl
+       * "fsl,imx6sx-usbphy" for imx6sx
   "fsl,imx23-usbphy" is still a fallback for other strings
 - reg: Should contain registers location and length
 - interrupts: Should contain phy interrupt
index 0218fcdc12994ea7f80ee1ce6246fde733a4b1a3..0c0970c210ab7be0c2a52ba4a977a0de8ddd0d5a 100644 (file)
@@ -2,7 +2,7 @@ Analog TV Connector
 ===================
 
 Required properties:
-- compatible: "composite-connector" or "svideo-connector"
+- compatible: "composite-video-connector" or "svideo-connector"
 
 Optional properties:
 - label: a symbolic name for the connector
@@ -14,7 +14,7 @@ Example
 -------
 
 tv: connector {
-       compatible = "composite-connector";
+       compatible = "composite-video-connector";
        label = "tv";
 
        port {
diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt
new file mode 100644 (file)
index 0000000..3a2f54d
--- /dev/null
@@ -0,0 +1,211 @@
+Open Firmware Device Tree Selftest
+----------------------------------
+
+Author: Gaurav Minocha <gaurav.minocha.os@gmail.com>
+
+1. Introduction
+
+This document explains how the test data required for executing OF selftest
+is attached to the live tree dynamically, independent of the machine's
+architecture.
+
+It is recommended to read the following documents before moving ahead.
+
+[1] Documentation/devicetree/usage-model.txt
+[2] http://www.devicetree.org/Device_Tree_Usage
+
+OF Selftest has been designed to test the interface (include/linux/of.h)
+provided to device driver developers to fetch the device information..etc.
+from the unflattened device tree data structure. This interface is used by
+most of the device drivers in various use cases.
+
+
+2. Test-data
+
+The Device Tree Source file (drivers/of/testcase-data/testcases.dts) contains
+the test data required for executing the unit tests automated in
+drivers/of/selftests.c. Currently, following Device Tree Source Include files
+(.dtsi) are included in testcase.dts:
+
+drivers/of/testcase-data/tests-interrupts.dtsi
+drivers/of/testcase-data/tests-platform.dtsi
+drivers/of/testcase-data/tests-phandle.dtsi
+drivers/of/testcase-data/tests-match.dtsi
+
+When the kernel is build with OF_SELFTEST enabled, then the following make rule
+
+$(obj)/%.dtb: $(src)/%.dts FORCE
+       $(call if_changed_dep, dtc)
+
+is used to compile the DT source file (testcase.dts) into a binary blob
+(testcase.dtb), also referred as flattened DT.
+
+After that, using the following rule the binary blob above is wrapped as an
+assembly file (testcase.dtb.S).
+
+$(obj)/%.dtb.S: $(obj)/%.dtb
+       $(call cmd, dt_S_dtb)
+
+The assembly file is compiled into an object file (testcase.dtb.o), and is
+linked into the kernel image.
+
+
+2.1. Adding the test data
+
+Un-flattened device tree structure:
+
+Un-flattened device tree consists of connected device_node(s) in form of a tree
+structure described below.
+
+// following struct members are used to construct the tree
+struct device_node {
+    ...
+    struct  device_node *parent;
+    struct  device_node *child;
+    struct  device_node *sibling;
+    struct  device_node *allnext;   /* next in list of all nodes */
+    ...
+ };
+
+Figure 1, describes a generic structure of machine’s un-flattened device tree
+considering only child and sibling pointers. There exists another pointer,
+*parent, that is used to traverse the tree in the reverse direction. So, at
+a particular level the child node and all the sibling nodes will have a parent
+pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4’s
+parent points to root node)
+
+root (‘/’)
+   |
+child1 -> sibling2 -> sibling3 -> sibling4 -> null
+   |         |           |           |
+   |         |           |          null
+   |         |           |
+   |         |        child31 -> sibling32 -> null
+   |         |           |          |
+   |         |          null       null
+   |         |
+   |      child21 -> sibling22 -> sibling23 -> null
+   |         |          |            |
+   |        null       null         null
+   |
+child11 -> sibling12 -> sibling13 -> sibling14 -> null
+   |           |           |            |
+   |           |           |           null
+   |           |           |
+  null        null       child131 -> null
+                           |
+                          null
+
+Figure 1: Generic structure of un-flattened device tree
+
+
+*allnext: it is used to link all the nodes of DT into a list. So, for the
+ above tree the list would be as follows:
+
+root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2->
+child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null
+
+Before executing OF selftest, it is required to attach the test data to
+machine's device tree (if present). So, when selftest_data_add() is called,
+at first it reads the flattened device tree data linked into the kernel image
+via the following kernel symbols:
+
+__dtb_testcases_begin - address marking the start of test data blob
+__dtb_testcases_end   - address marking the end of test data blob
+
+Secondly, it calls of_fdt_unflatten_device_tree() to unflatten the flattened
+blob. And finally, if the machine’s device tree (i.e live tree) is present,
+then it attaches the unflattened test data tree to the live tree, else it
+attaches itself as a live device tree.
+
+attach_node_and_children() uses of_attach_node() to attach the nodes into the
+live tree as explained below. To explain the same, the test data tree described
+ in Figure 2 is attached to the live tree described in Figure 1.
+
+root (‘/’)
+    |
+ testcase-data
+    |
+ test-child0 -> test-sibling1 -> test-sibling2 -> test-sibling3 -> null
+    |               |                |                |
+ test-child01      null             null             null
+
+
+allnext list:
+
+root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2
+->test-sibling3->null
+
+Figure 2: Example test data tree to be attached to live tree.
+
+According to the scenario above, the live tree is already present so it isn’t
+required to attach the root(‘/’) node. All other nodes are attached by calling
+of_attach_node() on each node.
+
+In the function of_attach_node(), the new node is attached as the child of the
+given parent in live tree. But, if parent already has a child then the new node
+replaces the current child and turns it into its sibling. So, when the testcase
+data node is attached to the live tree above (Figure 1), the final structure is
+ as shown in Figure 3.
+
+root (‘/’)
+   |
+testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
+   |               |          |           |           |
+ (...)             |          |           |          null
+                   |          |         child31 -> sibling32 -> null
+                   |          |           |           |
+                   |          |          null        null
+                   |          |
+                   |        child21 -> sibling22 -> sibling23 -> null
+                   |          |           |            |
+                   |         null        null         null
+                   |
+                child11 -> sibling12 -> sibling13 -> sibling14 -> null
+                   |          |            |            |
+                  null       null          |           null
+                                           |
+                                        child131 -> null
+                                           |
+                                          null
+-----------------------------------------------------------------------
+
+root (‘/’)
+   |
+testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
+   |               |          |           |           |
+   |             (...)      (...)       (...)        null
+   |
+test-sibling3 -> test-sibling2 -> test-sibling1 -> test-child0 -> null
+   |                |                   |                |
+  null             null                null         test-child01
+
+
+Figure 3: Live device tree structure after attaching the testcase-data.
+
+
+Astute readers would have noticed that test-child0 node becomes the last
+sibling compared to the earlier structure (Figure 2). After attaching first
+test-child0 the test-sibling1 is attached that pushes the child node
+(i.e. test-child0) to become a sibling and makes itself a child node,
+ as mentioned above.
+
+If a duplicate node is found (i.e. if a node with same full_name property is
+already present in the live tree), then the node isn’t attached rather its
+properties are updated to the live tree’s node by calling the function
+update_node_properties().
+
+
+2.2. Removing the test data
+
+Once the test case execution is complete, selftest_data_remove is called in
+order to remove the device nodes attached initially (first the leaf nodes are
+detached and then moving up the parent nodes are removed, and eventually the
+whole tree). selftest_data_remove() calls detach_node_and_children() that uses
+of_detach_node() to detach the nodes from the live device tree.
+
+To detach a node, of_detach_node() first updates all_next linked list, by
+attaching the previous node’s allnext to current node’s allnext pointer. And
+then, it either updates the child pointer of given node’s parent to its
+sibling or attaches the previous sibling to the given node’s sibling, as
+appropriate. That is it :)
index 5ae8608ca9f58a6331cae454ae7a0c84ab2f6c47..10d51c2f10d712b179f5e6f67d0b9032bb42b75c 100644 (file)
@@ -3541,6 +3541,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                        bogus residue values);
                                s = SINGLE_LUN (the device has only one
                                        Logical Unit);
+                               u = IGNORE_UAS (don't bind to the uas driver);
                                w = NO_WP_DETECT (don't test whether the
                                        medium is write-protected).
                        Example: quirks=0419:aaf5:rl,0421:0433:rc
index c48a9704bda8eaae5851bf54a0b0277cd01c88b4..d16f424c5e8d1e6484958401e6b8afec30803064 100644 (file)
@@ -462,9 +462,9 @@ JIT compiler
 ------------
 
 The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
-ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is
-transparently invoked for each attached filter from user space or for internal
-kernel users if it has been previously enabled by root:
+ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler
+is transparently invoked for each attached filter from user space or for
+internal kernel users if it has been previously enabled by root:
 
   echo 1 > /proc/sys/net/core/bpf_jit_enable
 
index 5e7866a486b0c3310b64ca5c9aa6cd6d9c16d354..f10ed3914ea85da75f4f61c86533dd04abcfce5e 100644 (file)
@@ -1665,6 +1665,12 @@ M:       Nicolas Ferre <nicolas.ferre@atmel.com>
 S:     Supported
 F:     drivers/tty/serial/atmel_serial.c
 
+ATMEL Audio ALSA driver
+M:     Bo Shen <voice.shen@atmel.com>
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:     Supported
+F:     sound/soc/atmel
+
 ATMEL DMA DRIVER
 M:     Nicolas Ferre <nicolas.ferre@atmel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2098,7 +2104,7 @@ S:        Supported
 F:     drivers/scsi/bfa/
 
 BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M:     Rasesh Mody <rmody@brocade.com>
+M:     Rasesh Mody <rasesh.mody@qlogic.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/brocade/bna/
@@ -3012,9 +3018,8 @@ S:        Supported
 F:     drivers/acpi/dock.c
 
 DOCUMENTATION
-M:     Randy Dunlap <rdunlap@infradead.org>
+M:     Jiri Kosina <jkosina@suse.cz>
 L:     linux-doc@vger.kernel.org
-T:     quilt http://www.infradead.org/~rdunlap/Doc/patches/
 S:     Maintained
 F:     Documentation/
 X:     Documentation/ABI/
@@ -4477,7 +4482,6 @@ M:        Mika Westerberg <mika.westerberg@linux.intel.com>
 L:     linux-i2c@vger.kernel.org
 L:     linux-acpi@vger.kernel.org
 S:     Maintained
-F:     drivers/i2c/i2c-acpi.c
 
 I2C-TAOS-EVM DRIVER
 M:     Jean Delvare <jdelvare@suse.de>
@@ -5480,7 +5484,7 @@ F:        drivers/macintosh/
 LINUX FOR POWERPC EMBEDDED MPC5XXX
 M:     Anatolij Gustschin <agust@denx.de>
 L:     linuxppc-dev@lists.ozlabs.org
-T:     git git://git.denx.de/linux-2.6-agust.git
+T:     git git://git.denx.de/linux-denx-agust.git
 S:     Maintained
 F:     arch/powerpc/platforms/512x/
 F:     arch/powerpc/platforms/52xx/
@@ -6424,7 +6428,8 @@ F:        Documentation/scsi/NinjaSCSI.txt
 F:     drivers/scsi/nsp32*
 
 NTB DRIVER
-M:     Jon Mason <jon.mason@intel.com>
+M:     Jon Mason <jdmason@kudzu.us>
+M:     Dave Jiang <dave.jiang@intel.com>
 S:     Supported
 W:     https://github.com/jonmason/ntb/wiki
 T:     git git://github.com/jonmason/ntb.git
@@ -6875,7 +6880,7 @@ F:        arch/x86/kernel/quirks.c
 
 PCI DRIVER FOR IMX6
 M:     Richard Zhu <r65037@freescale.com>
-M:     Shawn Guo <shawn.guo@freescale.com>
+M:     Lucas Stach <l.stach@pengutronix.de>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -7053,7 +7058,7 @@ S:        Maintained
 F:     drivers/pinctrl/sh-pfc/
 
 PIN CONTROLLER - SAMSUNG
-M:     Tomasz Figa <t.figa@samsung.com>
+M:     Tomasz Figa <tomasz.figa@gmail.com>
 M:     Thomas Abraham <thomas.abraham@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -7899,7 +7904,8 @@ S:        Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG SOC CLOCK DRIVERS
-M:     Tomasz Figa <t.figa@samsung.com>
+M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
+M:     Tomasz Figa <tomasz.figa@gmail.com>
 S:     Supported
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 F:     drivers/clk/samsung/
@@ -7912,6 +7918,19 @@ S:       Supported
 L:     netdev@vger.kernel.org
 F:     drivers/net/ethernet/samsung/sxgbe/
 
+SAMSUNG USB2 PHY DRIVER
+M:     Kamil Debski <k.debski@samsung.com>
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/phy/samsung-phy.txt
+F:     Documentation/phy/samsung-usb2.txt
+F:     drivers/phy/phy-exynos4210-usb2.c
+F:     drivers/phy/phy-exynos4x12-usb2.c
+F:     drivers/phy/phy-exynos5250-usb2.c
+F:     drivers/phy/phy-s5pv210-usb2.c
+F:     drivers/phy/phy-samsung-usb2.c
+F:     drivers/phy/phy-samsung-usb2.h
+
 SERIAL DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
index 1a60bdd05c9a1611f3cee75f1073354d5a6297d0..b77de27e58fc6ecbec9aa292d6a36fc1433dcd2a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index e03fbf3c6889120e9fce89d0fdb39fb70fe7b222..b40cdadb1f87b1057fc52cbdd1257ab0e7f1a429 100644 (file)
                gpmc,device-width = <2>;
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <40>;
-               gpmc,cs-wr-off-ns = <40>;
+               gpmc,cs-rd-off-ns = <80>;
+               gpmc,cs-wr-off-ns = <80>;
                gpmc,adv-on-ns = <0>;
-               gpmc,adv-rd-off-ns = <30>;
-               gpmc,adv-wr-off-ns = <30>;
-               gpmc,we-on-ns = <5>;
-               gpmc,we-off-ns = <25>;
-               gpmc,oe-on-ns = <2>;
-               gpmc,oe-off-ns = <20>;
-               gpmc,access-ns = <20>;
-               gpmc,wr-access-ns = <40>;
-               gpmc,rd-cycle-ns = <40>;
-               gpmc,wr-cycle-ns = <40>;
-               gpmc,wait-pin = <0>;
-               gpmc,wait-on-read;
-               gpmc,wait-on-write;
+               gpmc,adv-rd-off-ns = <60>;
+               gpmc,adv-wr-off-ns = <60>;
+               gpmc,we-on-ns = <10>;
+               gpmc,we-off-ns = <50>;
+               gpmc,oe-on-ns = <4>;
+               gpmc,oe-off-ns = <40>;
+               gpmc,access-ns = <40>;
+               gpmc,wr-access-ns = <80>;
+               gpmc,rd-cycle-ns = <80>;
+               gpmc,wr-cycle-ns = <80>;
                gpmc,bus-turnaround-ns = <0>;
                gpmc,cycle2cycle-delay-ns = <0>;
                gpmc,clk-activation-ns = <0>;
index c6c58c1c00e3d866149bc17f12d4cff71fb2b96b..6b675a02066fbe6fb5575d6260a8a891847748d9 100644 (file)
                                status = "disabled";
 
                                lvds-channel@0 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
                                        reg = <0>;
                                        status = "disabled";
 
-                                       port {
+                                       port@0 {
+                                               reg = <0>;
+
                                                lvds0_in: endpoint {
                                                        remote-endpoint = <&ipu_di0_lvds0>;
                                                };
                                };
 
                                lvds-channel@1 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
                                        reg = <1>;
                                        status = "disabled";
 
-                                       port {
+                                       port@1 {
+                                               reg = <1>;
+
                                                lvds1_in: endpoint {
                                                        remote-endpoint = <&ipu_di1_lvds1>;
                                                };
index 598afe91c6763b368f2dd53f807a12e780b1a41e..4773d6af66a0ad8bfa2296a53737e0d82f3698ce 100644 (file)
@@ -40,7 +40,7 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,psc-clock";
                clocks = <&chipclk16>;
-               clock-output-names = "usb";
+               clock-output-names = "usb1";
                reg = <0x02350004 0xb00>, <0x02350000 0x400>;
                reg-names = "control", "domain";
                domain-id = <0>;
@@ -60,8 +60,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,psc-clock";
                clocks = <&chipclk12>;
-               clock-output-names = "pcie";
-               reg = <0x0235006c 0xb00>, <0x02350000 0x400>;
+               clock-output-names = "pcie1";
+               reg = <0x0235006c 0xb00>, <0x02350048 0x400>;
                reg-names = "control", "domain";
                domain-id = <18>;
        };
index 1fe45d1f75ec8d52aa8dd59a1e344a931fc32576..4361777a08d81090cca597271a5f7a6d3b22ea25 100644 (file)
@@ -93,7 +93,7 @@
        };
 
        tv: connector {
-               compatible = "composite-connector";
+               compatible = "composite-video-connector";
                label = "tv";
 
                port {
index b8698ca686471857c28ef4af49593d1837ad6438..429471aa7a1f49fa0f2b557d1192a8e3361a57c6 100644 (file)
                                };
 
                                ldo8_reg: ldo8 {
-                                       /* VDD_3v0: Does not go anywhere */
+                                       /* VDD_3V_GP: act led/serial console */
                                        regulator-name = "ldo8";
                                        regulator-min-microvolt = <3000000>;
                                        regulator-max-microvolt = <3000000>;
+                                       regulator-always-on;
                                        regulator-boot-on;
-                                       /* Unused */
-                                       status = "disabled";
                                };
 
                                ldo9_reg: ldo9 {
index 79ecb4f34ffb34f25037f829a6227ea69b7a9fc8..10e78d00a0bb348a8e77aa8f04b484a6b2195bfd 100644 (file)
@@ -466,6 +466,7 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
  */
 #define v7_exit_coherency_flush(level) \
        asm volatile( \
+       ".arch  armv7-a \n\t" \
        "stmfd  sp!, {fp, ip} \n\t" \
        "mrc    p15, 0, r0, c1, c0, 0   @ get SCTLR \n\t" \
        "bic    r0, r0, #"__stringify(CR_C)" \n\t" \
index 83259b8733337e05091c82d9705d8e6c224118f4..5f833f7adba1abdc8620a11020cb2c80dec24d26 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef __ASMARM_TLS_H
 #define __ASMARM_TLS_H
 
+#include <linux/compiler.h>
+#include <asm/thread_info.h>
+
 #ifdef __ASSEMBLY__
 #include <asm/asm-offsets.h>
        .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
 #endif
 
 #ifndef __ASSEMBLY__
+
+static inline void set_tls(unsigned long val)
+{
+       struct thread_info *thread;
+
+       thread = current_thread_info();
+
+       thread->tp_value[0] = val;
+
+       /*
+        * This code runs with preemption enabled and therefore must
+        * be reentrant with respect to switch_tls.
+        *
+        * We need to ensure ordering between the shadow state and the
+        * hardware state, so that we don't corrupt the hardware state
+        * with a stale shadow state during context switch.
+        *
+        * If we're preempted here, switch_tls will load TPIDRURO from
+        * thread_info upon resuming execution and the following mcr
+        * is merely redundant.
+        */
+       barrier();
+
+       if (!tls_emu) {
+               if (has_tls_reg) {
+                       asm("mcr p15, 0, %0, c13, c0, 3"
+                           : : "r" (val));
+               } else {
+#ifdef CONFIG_KUSER_HELPERS
+                       /*
+                        * User space must never try to access this
+                        * directly.  Expect your app to break
+                        * eventually if you do so.  The user helper
+                        * at 0xffff0fe0 must be used instead.  (see
+                        * entry-armv.S for details)
+                        */
+                       *((unsigned int *)0xffff0ff0) = val;
+#endif
+               }
+
+       }
+}
+
 static inline unsigned long get_tpuser(void)
 {
        unsigned long reg = 0;
@@ -59,5 +105,23 @@ static inline unsigned long get_tpuser(void)
 
        return reg;
 }
+
+static inline void set_tpuser(unsigned long val)
+{
+       /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
+        * we need not update thread_info.
+        */
+       if (has_tls_reg && !tls_emu) {
+               asm("mcr p15, 0, %0, c13, c0, 2"
+                   : : "r" (val));
+       }
+}
+
+static inline void flush_tls(void)
+{
+       set_tls(0);
+       set_tpuser(0);
+}
+
 #endif
 #endif /* __ASMARM_TLS_H */
index a4cd7af475e90de4bb57398cfd569e06f15a1135..4767eb9caa78c89fe0c6b447b0d234fb71738285 100644 (file)
@@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs)
 extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
-extern int __get_user_lo8(void *);
+extern int __get_user_32t_8(void *);
 extern int __get_user_8(void *);
+extern int __get_user_64t_1(void *);
+extern int __get_user_64t_2(void *);
+extern int __get_user_64t_4(void *);
 
 #define __GUP_CLOBBER_1        "lr", "cc"
 #ifdef CONFIG_CPU_USE_DOMAINS
@@ -117,7 +120,7 @@ extern int __get_user_8(void *);
 #define __GUP_CLOBBER_2 "lr", "cc"
 #endif
 #define __GUP_CLOBBER_4        "lr", "cc"
-#define __GUP_CLOBBER_lo8 "lr", "cc"
+#define __GUP_CLOBBER_32t_8 "lr", "cc"
 #define __GUP_CLOBBER_8        "lr", "cc"
 
 #define __get_user_x(__r2,__p,__e,__l,__s)                             \
@@ -131,12 +134,30 @@ extern int __get_user_8(void *);
 
 /* narrowing a double-word get into a single 32bit word register: */
 #ifdef __ARMEB__
-#define __get_user_xb(__r2, __p, __e, __l, __s)                                \
-       __get_user_x(__r2, __p, __e, __l, lo8)
+#define __get_user_x_32t(__r2, __p, __e, __l, __s)                             \
+       __get_user_x(__r2, __p, __e, __l, 32t_8)
 #else
-#define __get_user_xb __get_user_x
+#define __get_user_x_32t __get_user_x
 #endif
 
+/*
+ * storing result into proper least significant word of 64bit target var,
+ * different only for big endian case where 64 bit __r2 lsw is r3:
+ */
+#ifdef __ARMEB__
+#define __get_user_x_64t(__r2, __p, __e, __l, __s)                     \
+          __asm__ __volatile__ (                                       \
+               __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
+               __asmeq("%3", "r1")                                     \
+               "bl     __get_user_64t_" #__s                           \
+               : "=&r" (__e), "=r" (__r2)                              \
+               : "0" (__p), "r" (__l)                                  \
+               : __GUP_CLOBBER_##__s)
+#else
+#define __get_user_x_64t __get_user_x
+#endif
+
+
 #define __get_user_check(x,p)                                                  \
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
@@ -146,17 +167,26 @@ extern int __get_user_8(void *);
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
-                       __get_user_x(__r2, __p, __e, __l, 1);           \
+                       if (sizeof((x)) >= 8)                           \
+                               __get_user_x_64t(__r2, __p, __e, __l, 1); \
+                       else                                            \
+                               __get_user_x(__r2, __p, __e, __l, 1);   \
                        break;                                          \
                case 2:                                                 \
-                       __get_user_x(__r2, __p, __e, __l, 2);           \
+                       if (sizeof((x)) >= 8)                           \
+                               __get_user_x_64t(__r2, __p, __e, __l, 2); \
+                       else                                            \
+                               __get_user_x(__r2, __p, __e, __l, 2);   \
                        break;                                          \
                case 4:                                                 \
-                       __get_user_x(__r2, __p, __e, __l, 4);           \
+                       if (sizeof((x)) >= 8)                           \
+                               __get_user_x_64t(__r2, __p, __e, __l, 4); \
+                       else                                            \
+                               __get_user_x(__r2, __p, __e, __l, 4);   \
                        break;                                          \
                case 8:                                                 \
                        if (sizeof((x)) < 8)                            \
-                               __get_user_xb(__r2, __p, __e, __l, 4);  \
+                               __get_user_x_32t(__r2, __p, __e, __l, 4); \
                        else                                            \
                                __get_user_x(__r2, __p, __e, __l, 8);   \
                        break;                                          \
index 1109017499e52f05e92ca7056831323f4ed330e9..e8275ea88e8806d79b784deef60b57c6cf76a8ad 100644 (file)
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
        __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
 }
 
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
-{
-       if (__generic_dma_ops(hwdev)->unmap_page)
-               __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+               struct dma_attrs *attrs);
 
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
-               __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_device)
-               __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
index ded062f9b358038c05fa074816706c40619e6408..135c24a5ba262b76529fa1c3dbe088d098de84a5 100644 (file)
@@ -33,7 +33,6 @@ typedef struct xpaddr {
 #define INVALID_P2M_ENTRY      (~0UL)
 
 unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
 extern struct rb_root phys_to_mach;
 
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
 
 static inline unsigned long mfn_to_pfn(unsigned long mfn)
 {
-       unsigned long pfn;
-
-       if (phys_to_mach.rb_node != NULL) {
-               pfn = __mfn_to_pfn(mfn);
-               if (pfn != INVALID_P2M_ENTRY)
-                       return pfn;
-       }
-
        return mfn;
 }
 
index f7b450f97e6884bf5d28de653e7d291f809ab1d1..a88671cfe1ffb1e1ee43b06a7c8c6fd704f38580 100644 (file)
@@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
 EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(__get_user_64t_1);
+EXPORT_SYMBOL(__get_user_64t_2);
+EXPORT_SYMBOL(__get_user_64t_4);
+EXPORT_SYMBOL(__get_user_32t_8);
+#endif
 
 EXPORT_SYMBOL(__put_user_1);
 EXPORT_SYMBOL(__put_user_2);
index 2c425760451340b41a9d5748f5edf0e0bcbcaaf3..5c4d38e32a5128a48ce20cc9d2757465e4e7b935 100644 (file)
@@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
                cpumask_copy(d->affinity, affinity);
 
        return ret;
index 08d731294bcdda819fdc7cfe1e12cd4bf525a771..b206d7790c7790af1a4c740c5d1c65c43b7f45c5 100644 (file)
  *
  *     @ TESTCASE_START
  *     bl      __kprobes_test_case_start
- *     @ start of inline data...
+ *     .pushsection .rodata
+ *     "10:
  *     .ascii "mov r0, r7"     @ text title for test case
  *     .byte   0
- *     .align  2, 0
+ *     .popsection
+ *     @ start of inline data...
+ *     .word   10b             @ pointer to title in .rodata section
  *
  *     @ TEST_ARG_REG
  *     .byte   ARG_TYPE_REG
@@ -971,7 +974,7 @@ void __naked __kprobes_test_case_start(void)
        __asm__ __volatile__ (
                "stmdb  sp!, {r4-r11}                           \n\t"
                "sub    sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
-               "bic    r0, lr, #1  @ r0 = inline title string  \n\t"
+               "bic    r0, lr, #1  @ r0 = inline data          \n\t"
                "mov    r1, sp                                  \n\t"
                "bl     kprobes_test_case_start                 \n\t"
                "bx     r0                                      \n\t"
@@ -1349,15 +1352,14 @@ static unsigned long next_instruction(unsigned long pc)
        return pc + 4;
 }
 
-static uintptr_t __used kprobes_test_case_start(const char *title, void *stack)
+static uintptr_t __used kprobes_test_case_start(const char **title, void *stack)
 {
        struct test_arg *args;
        struct test_arg_end *end_arg;
        unsigned long test_code;
 
-       args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4);
-
-       current_title = title;
+       current_title = *title++;
+       args = (struct test_arg *)title;
        current_args = args;
        current_stack = stack;
 
index eecc90a0fd912e7a0de0a50c7a707df8864d59a3..4430990e90e7aaf309d67dbad363490aced5a413 100644 (file)
@@ -111,11 +111,14 @@ struct test_arg_end {
 #define TESTCASE_START(title)                                  \
        __asm__ __volatile__ (                                  \
        "bl     __kprobes_test_case_start               \n\t"   \
+       ".pushsection .rodata                           \n\t"   \
+       "10:                                            \n\t"   \
        /* don't use .asciz here as 'title' may be */           \
        /* multiple strings to be concatenated.  */             \
        ".ascii "#title"                                \n\t"   \
        ".byte  0                                       \n\t"   \
-       ".align 2, 0                                    \n\t"
+       ".popsection                                    \n\t"   \
+       ".word  10b                                     \n\t"
 
 #define        TEST_ARG_REG(reg, val)                                  \
        ".byte  "__stringify(ARG_TYPE_REG)"             \n\t"   \
index e6a6edbec6135de27cf0d6426185cec90654f6fc..4bf4cce759fe1cf6d56f25c6d4141124709c7a33 100644 (file)
@@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
 
 static void cpu_pmu_enable_percpu_irq(void *data)
 {
-       struct arm_pmu *cpu_pmu = data;
-       struct platform_device *pmu_device = cpu_pmu->plat_device;
-       int irq = platform_get_irq(pmu_device, 0);
+       int irq = *(int *)data;
 
        enable_percpu_irq(irq, IRQ_TYPE_NONE);
-       cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
 }
 
 static void cpu_pmu_disable_percpu_irq(void *data)
 {
-       struct arm_pmu *cpu_pmu = data;
-       struct platform_device *pmu_device = cpu_pmu->plat_device;
-       int irq = platform_get_irq(pmu_device, 0);
+       int irq = *(int *)data;
 
-       cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
        disable_percpu_irq(irq);
 }
 
@@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
 
        irq = platform_get_irq(pmu_device, 0);
        if (irq >= 0 && irq_is_percpu(irq)) {
-               on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
+               on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
                free_percpu_irq(irq, &percpu_pmu);
        } else {
                for (i = 0; i < irqs; ++i) {
@@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
                                irq);
                        return err;
                }
-               on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
+               on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
        } else {
                for (i = 0; i < irqs; ++i) {
                        err = 0;
index 81ef686a91ca18dccfbb85cd75b5a2aa6c303ecc..a35f6ebbd2c2bddecf8f5b63a04e612ad5d38c75 100644 (file)
@@ -334,6 +334,8 @@ void flush_thread(void)
        memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
        memset(&thread->fpstate, 0, sizeof(union fp_state));
 
+       flush_tls();
+
        thread_notify(THREAD_NOTIFY_FLUSH, thread);
 }
 
index 67ca8578c6d8fd9adf35cfebbe0d75cdda8d4786..587fdfe1a72cdb6c582ee03d455ec391dd456a83 100644 (file)
@@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
        while (1) {
                unsigned long temp;
 
-               /*
-                * Barrier required between accessing protected resource and
-                * releasing a lock for it. Legacy code might not have done
-                * this, and we cannot determine that this is not the case
-                * being emulated, so insert always.
-                */
-               smp_mb();
-
                if (type == TYPE_SWPB)
                        __user_swpb_asm(*data, address, res, temp);
                else
@@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
        }
 
        if (res == 0) {
-               /*
-                * Barrier also required between acquiring a lock for a
-                * protected resource and accessing the resource. Inserted for
-                * same reason as above.
-                */
-               smp_mb();
-
                if (type == TYPE_SWPB)
                        swpbcounter++;
                else
index 7b8403b7666617f30baca2fec58fb579615e679b..80f0d69205e73f28a72d3917d73162bc23cf3417 100644 (file)
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
 
        switch (cmd) {
        case THREAD_NOTIFY_FLUSH:
-               thread->thumbee_state = 0;
+               teehbr_write(0);
                break;
        case THREAD_NOTIFY_SWITCH:
                current_thread_info()->thumbee_state = teehbr_read();
index c8e4bb7149444ce9c2670c92345ea67ccb94eed8..a964c9f40f875882e3bf4d722eb7290074981709 100644 (file)
@@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 {
-       struct thread_info *thread = current_thread_info();
        siginfo_t info;
 
        if ((no >> 16) != (__ARM_NR_BASE>> 16))
@@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
                return regs->ARM_r0;
 
        case NR(set_tls):
-               thread->tp_value[0] = regs->ARM_r0;
-               if (tls_emu)
-                       return 0;
-               if (has_tls_reg) {
-                       asm ("mcr p15, 0, %0, c13, c0, 3"
-                               : : "r" (regs->ARM_r0));
-               } else {
-                       /*
-                        * User space must never try to access this directly.
-                        * Expect your app to break eventually if you do so.
-                        * The user helper at 0xffff0fe0 must be used instead.
-                        * (see entry-armv.S for details)
-                        */
-                       *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
-               }
+               set_tls(regs->ARM_r0);
                return 0;
 
 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
index 938600098b88690507ef22489e3a3d0dbc3cc401..8ecfd15c3a0248db29667fe3dc6ec6429fc9fc7c 100644 (file)
@@ -80,7 +80,7 @@ ENTRY(__get_user_8)
 ENDPROC(__get_user_8)
 
 #ifdef __ARMEB__
-ENTRY(__get_user_lo8)
+ENTRY(__get_user_32t_8)
        check_uaccess r0, 8, r1, r2, __get_user_bad
 #ifdef CONFIG_CPU_USE_DOMAINS
        add     r0, r0, #4
@@ -90,7 +90,37 @@ ENTRY(__get_user_lo8)
 #endif
        mov     r0, #0
        ret     lr
-ENDPROC(__get_user_lo8)
+ENDPROC(__get_user_32t_8)
+
+ENTRY(__get_user_64t_1)
+       check_uaccess r0, 1, r1, r2, __get_user_bad8
+8: TUSER(ldrb) r3, [r0]
+       mov     r0, #0
+       ret     lr
+ENDPROC(__get_user_64t_1)
+
+ENTRY(__get_user_64t_2)
+       check_uaccess r0, 2, r1, r2, __get_user_bad8
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb     .req    ip
+9:     ldrbt   r3, [r0], #1
+10:    ldrbt   rb, [r0], #0
+#else
+rb     .req    r0
+9:     ldrb    r3, [r0]
+10:    ldrb    rb, [r0, #1]
+#endif
+       orr     r3, rb, r3, lsl #8
+       mov     r0, #0
+       ret     lr
+ENDPROC(__get_user_64t_2)
+
+ENTRY(__get_user_64t_4)
+       check_uaccess r0, 4, r1, r2, __get_user_bad8
+11: TUSER(ldr) r3, [r0]
+       mov     r0, #0
+       ret     lr
+ENDPROC(__get_user_64t_4)
 #endif
 
 __get_user_bad8:
@@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8)
        .long   6b, __get_user_bad8
 #ifdef __ARMEB__
        .long   7b, __get_user_bad
+       .long   8b, __get_user_bad8
+       .long   9b, __get_user_bad8
+       .long   10b, __get_user_bad8
+       .long   11b, __get_user_bad8
 #endif
 .popsection
index 84acdfd1d715bfd796066832971fbe0ad51c757a..5a75cdc81891c369d3cfd738d9fb3271e7ca208b 100644 (file)
@@ -97,7 +97,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw)
        struct clk_gate2 *gate = to_clk_gate2(hw);
 
        if (gate->share_count)
-               return !!(*gate->share_count);
+               return !!__clk_get_enable_count(hw->clk);
        else
                return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
 }
@@ -127,10 +127,6 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
        gate->bit_idx = bit_idx;
        gate->flags = clk_gate2_flags;
        gate->lock = lock;
-
-       /* Initialize share_count per hardware state */
-       if (share_count)
-               *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0;
        gate->share_count = share_count;
 
        init.name = name;
index e7189dcc9309431291205b92c5328aed1ea71ffd..08d4167cc7c55751ddb71516b0a125e00afd9279 100644 (file)
@@ -1,9 +1,6 @@
 menu "TI OMAP/AM/DM/DRA Family"
        depends on ARCH_MULTI_V6 || ARCH_MULTI_V7
 
-config ARCH_OMAP
-       bool
-
 config ARCH_OMAP2
        bool "TI OMAP2"
        depends on ARCH_MULTI_V6
index 8fd87a3055bf6c4a084a25a51f19e4263567add7..9e91a4e7519a63709ea38056373491f2665c8327 100644 (file)
@@ -2065,7 +2065,7 @@ static void _reconfigure_io_chain(void)
 
        spin_lock_irqsave(&io_chain_lock, flags);
 
-       if (cpu_is_omap34xx() && omap3_has_io_chain_ctrl())
+       if (cpu_is_omap34xx())
                omap3xxx_prm_reconfigure_io_chain();
        else if (cpu_is_omap44xx())
                omap44xx_prm_reconfigure_io_chain();
index 2458be6fc67bd7ef0617aea4899164d1507077c0..372de3edf4a582f24957beceb5764e6b1247b527 100644 (file)
@@ -45,7 +45,7 @@ static struct omap_prcm_irq_setup omap3_prcm_irq_setup = {
        .ocp_barrier            = &omap3xxx_prm_ocp_barrier,
        .save_and_clear_irqen   = &omap3xxx_prm_save_and_clear_irqen,
        .restore_irqen          = &omap3xxx_prm_restore_irqen,
-       .reconfigure_io_chain   = &omap3xxx_prm_reconfigure_io_chain,
+       .reconfigure_io_chain   = NULL,
 };
 
 /*
@@ -369,15 +369,30 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
 }
 
 /**
- * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
+ * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain
+ *
+ * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only
+ * thing we can do is toggle EN_IO bit for earlier omaps.
+ */
+void omap3430_pre_es3_1_reconfigure_io_chain(void)
+{
+       omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+                                    PM_WKEN);
+       omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+                                  PM_WKEN);
+       omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
+}
+
+/**
+ * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
  *
  * Clear any previously-latched I/O wakeup events and ensure that the
  * I/O wakeup gates are aligned with the current mux settings.  Works
  * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then
  * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit.  No
- * return value.
+ * return value. These registers are only available in 3430 es3.1 and later.
  */
-void omap3xxx_prm_reconfigure_io_chain(void)
+void omap3_prm_reconfigure_io_chain(void)
 {
        int i = 0;
 
@@ -399,6 +414,15 @@ void omap3xxx_prm_reconfigure_io_chain(void)
        omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST);
 }
 
+/**
+ * omap3xxx_prm_reconfigure_io_chain - reconfigure I/O chain
+ */
+void omap3xxx_prm_reconfigure_io_chain(void)
+{
+       if (omap3_prcm_irq_setup.reconfigure_io_chain)
+               omap3_prcm_irq_setup.reconfigure_io_chain();
+}
+
 /**
  * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
  *
@@ -656,6 +680,13 @@ static int omap3xxx_prm_late_init(void)
        if (!(prm_features & PRM_HAS_IO_WAKEUP))
                return 0;
 
+       if (omap3_has_io_chain_ctrl())
+               omap3_prcm_irq_setup.reconfigure_io_chain =
+                       omap3_prm_reconfigure_io_chain;
+       else
+               omap3_prcm_irq_setup.reconfigure_io_chain =
+                       omap3430_pre_es3_1_reconfigure_io_chain;
+
        omap3xxx_prm_enable_io_wakeup();
        ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
        if (!ret)
index 630fa916bbc60601ffc94e9ca94616dbb34debe4..04b013fbc98f46a02ae227fe86c631fa26985caa 100644 (file)
@@ -61,7 +61,7 @@ EXPORT_SYMBOL(get_clock_tick_rate);
 /*
  * For non device-tree builds, keep legacy timer init
  */
-void pxa_timer_init(void)
+void __init pxa_timer_init(void)
 {
        pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000),
                            get_clock_tick_rate());
index 0c1ab49e5f7b7aca6d35d7be575061b8d248a7c9..83792f4324ead79f82587b25226ecc65732293fc 100644 (file)
@@ -41,6 +41,7 @@
  * This code is not portable to processors with late data abort handling.
  */
 #define CODING_BITS(i) (i & 0x0e000000)
+#define COND_BITS(i)   (i & 0xf0000000)
 
 #define LDST_I_BIT(i)  (i & (1 << 26))         /* Immediate constant   */
 #define LDST_P_BIT(i)  (i & (1 << 24))         /* Preindex             */
@@ -821,6 +822,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                break;
 
        case 0x04000000:        /* ldr or str immediate */
+               if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
+                       goto bad;
                offset.un = OFFSET_BITS(instr);
                handler = do_alignment_ldrstr;
                break;
index 1a24e9232ec8653b70dc163dd524cf1b6f85b951..d3daed0ae0ad92ff9aefe4ad1e6e151c9e2a855a 100644 (file)
@@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext)
        mov     \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT)        @ upper bits
        mov     \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT             @ lower bits
        addls   \ttbr1, \ttbr1, #TTBR1_OFFSET
-       adcls   \tmp, \tmp, #0
        mcrr    p15, 1, \ttbr1, \tmp, c2                        @ load TTBR1
        mov     \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT)        @ upper bits
        mov     \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT             @ lower bits
@@ -158,9 +157,9 @@ ENDPROC(cpu_v7_set_pte_ext)
         *  TFR   EV X F   IHD LR    S
         * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
         * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
-        *   11    0 110    1  0011 1100 .111 1101 < we want
+        *   11    0 110    0  0011 1100 .111 1101 < we want
         */
        .align  2
        .type   v7_crval, #object
 v7_crval:
-       crval   clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
+       crval   clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c
index 02fc10d2d63bc28e064d918ea04af3115568600e..d055db32ffcb8b5b0b9e1fef70d58539686314c9 100644 (file)
@@ -1,3 +1,6 @@
+config ARCH_OMAP
+       bool
+
 if ARCH_OMAP
 
 menu "TI OMAP Common Features"
index 12969523414cf2d0b972bdfb8b3ff50b9c781c8f..1f85bfe6b4704d97e999b748f46a61a362f20d66 100644 (file)
@@ -1 +1 @@
-obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
index 98544c5f86e99710a7845e7a55648cdf73c14599..0e15f011f9c86895db17d57e0e1e999164d0312c 100644 (file)
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
        xen_domain_type = XEN_HVM_DOMAIN;
 
        xen_setup_features();
+
+       if (!xen_feature(XENFEAT_grant_map_identity)) {
+               pr_warn("Please upgrade your Xen.\n"
+                               "If your platform has any non-coherent DMA devices, they won't work properly.\n");
+       }
+
        if (xen_feature(XENFEAT_dom0))
                xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
        else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644 (file)
index 0000000..3b99860
--- /dev/null
@@ -0,0 +1,202 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <xen/features.h>
+
+static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
+static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
+
+static int alloc_xen_mm32_scratch_page(int cpu)
+{
+       struct page *page;
+       unsigned long virt;
+       pmd_t *pmdp;
+       pte_t *ptep;
+
+       if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
+               return 0;
+
+       page = alloc_page(GFP_KERNEL);
+       if (page == NULL) {
+               pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
+               return -ENOMEM;
+       }
+
+       virt = (unsigned long)__va(page_to_phys(page));
+       pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+       ptep = pte_offset_kernel(pmdp, virt);
+
+       per_cpu(xen_mm32_scratch_virt, cpu) = virt;
+       per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
+
+       return 0;
+}
+
+static int xen_mm32_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       int cpu = (long)hcpu;
+       switch (action) {
+       case CPU_UP_PREPARE:
+               if (alloc_xen_mm32_scratch_page(cpu))
+                       return NOTIFY_BAD;
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block xen_mm32_cpu_notifier = {
+       .notifier_call  = xen_mm32_cpu_notify,
+};
+
+static void* xen_mm32_remap_page(dma_addr_t handle)
+{
+       unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
+       pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
+
+       *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
+       local_flush_tlb_kernel_page(virt);
+
+       return (void*)virt;
+}
+
+static void xen_mm32_unmap(void *vaddr)
+{
+       put_cpu_var(xen_mm32_scratch_virt);
+}
+
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+       size_t size, enum dma_data_direction dir,
+       void (*op)(const void *, size_t, int))
+{
+       unsigned long pfn;
+       size_t left = size;
+
+       pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+       offset %= PAGE_SIZE;
+
+       do {
+               size_t len = left;
+               void *vaddr;
+       
+               if (!pfn_valid(pfn))
+               {
+                       /* Cannot map the page, we don't know its physical address.
+                        * Return and hope for the best */
+                       if (!xen_feature(XENFEAT_grant_map_identity))
+                               return;
+                       vaddr = xen_mm32_remap_page(handle) + offset;
+                       op(vaddr, len, dir);
+                       xen_mm32_unmap(vaddr - offset);
+               } else {
+                       struct page *page = pfn_to_page(pfn);
+
+                       if (PageHighMem(page)) {
+                               if (len + offset > PAGE_SIZE)
+                                       len = PAGE_SIZE - offset;
+
+                               if (cache_is_vipt_nonaliasing()) {
+                                       vaddr = kmap_atomic(page);
+                                       op(vaddr + offset, len, dir);
+                                       kunmap_atomic(vaddr);
+                               } else {
+                                       vaddr = kmap_high_get(page);
+                                       if (vaddr) {
+                                               op(vaddr + offset, len, dir);
+                                               kunmap_high(page);
+                                       }
+                               }
+                       } else {
+                               vaddr = page_address(page) + offset;
+                               op(vaddr, len, dir);
+                       }
+               }
+
+               offset = 0;
+               pfn++;
+               left -= len;
+       } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+       /* Cannot use __dma_page_dev_to_cpu because we don't have a
+        * struct page for handle */
+
+       if (dir != DMA_TO_DEVICE)
+               outer_inv_range(handle, handle + size);
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
+
+       if (dir == DMA_FROM_DEVICE) {
+               outer_inv_range(handle, handle + size);
+       } else {
+               outer_clean_range(handle, handle + size);
+       }
+}
+
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+
+{
+       if (!__generic_dma_ops(hwdev)->unmap_page)
+               return;
+       if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               return;
+
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
+               return;
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_device)
+               return;
+       __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+int __init xen_mm32_init(void)
+{
+       int cpu;
+
+       if (!xen_initial_domain())
+               return 0;
+
+       register_cpu_notifier(&xen_mm32_cpu_notifier);
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if (alloc_xen_mm32_scratch_page(cpu)) {
+                       put_online_cpus();
+                       unregister_cpu_notifier(&xen_mm32_cpu_notifier);
+                       return -ENOMEM;
+               }
+       }
+       put_online_cpus();
+
+       return 0;
+}
+arch_initcall(xen_mm32_init);
index 97baf44278171680a0932f949c22e8e26c54926f..05485777625474e835fc41a5d57fb494ca778552 100644 (file)
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
        unsigned long pfn;
        unsigned long mfn;
        unsigned long nr_pages;
-       struct rb_node rbnode_mach;
        struct rb_node rbnode_phys;
 };
 
 static rwlock_t p2m_lock;
 struct rb_root phys_to_mach = RB_ROOT;
 EXPORT_SYMBOL_GPL(phys_to_mach);
-static struct rb_root mach_to_phys = RB_ROOT;
 
 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
 {
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
                parent = *link;
                entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
 
-               if (new->mfn == entry->mfn)
-                       goto err_out;
                if (new->pfn == entry->pfn)
                        goto err_out;
 
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(__pfn_to_mfn);
 
-static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
-{
-       struct rb_node **link = &mach_to_phys.rb_node;
-       struct rb_node *parent = NULL;
-       struct xen_p2m_entry *entry;
-       int rc = 0;
-
-       while (*link) {
-               parent = *link;
-               entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
-
-               if (new->mfn == entry->mfn)
-                       goto err_out;
-               if (new->pfn == entry->pfn)
-                       goto err_out;
-
-               if (new->mfn < entry->mfn)
-                       link = &(*link)->rb_left;
-               else
-                       link = &(*link)->rb_right;
-       }
-       rb_link_node(&new->rbnode_mach, parent, link);
-       rb_insert_color(&new->rbnode_mach, &mach_to_phys);
-       goto out;
-
-err_out:
-       rc = -EINVAL;
-       pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
-                       __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
-out:
-       return rc;
-}
-
-unsigned long __mfn_to_pfn(unsigned long mfn)
-{
-       struct rb_node *n = mach_to_phys.rb_node;
-       struct xen_p2m_entry *entry;
-       unsigned long irqflags;
-
-       read_lock_irqsave(&p2m_lock, irqflags);
-       while (n) {
-               entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
-               if (entry->mfn <= mfn &&
-                               entry->mfn + entry->nr_pages > mfn) {
-                       read_unlock_irqrestore(&p2m_lock, irqflags);
-                       return entry->pfn + (mfn - entry->mfn);
-               }
-               if (mfn < entry->mfn)
-                       n = n->rb_left;
-               else
-                       n = n->rb_right;
-       }
-       read_unlock_irqrestore(&p2m_lock, irqflags);
-
-       return INVALID_P2M_ENTRY;
-}
-EXPORT_SYMBOL_GPL(__mfn_to_pfn);
-
 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                            struct gnttab_map_grant_ref *kmap_ops,
                            struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
                        p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
                        if (p2m_entry->pfn <= pfn &&
                                        p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
-                               rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
                                rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
                                write_unlock_irqrestore(&p2m_lock, irqflags);
                                kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
        p2m_entry->mfn = mfn;
 
        write_lock_irqsave(&p2m_lock, irqflags);
-       if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
-               (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+       if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
                write_unlock_irqrestore(&p2m_lock, irqflags);
                return false;
        }
index 0f08dfd69ebc73ea99b7b7f6d68c4b2f320eb2d1..dfa6e3e74fddec289649c1baba921378f521f611 100644 (file)
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
                return false;
 
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+               affinity = cpu_online_mask;
                ret = true;
+       }
 
-       /*
-        * when using forced irq_set_affinity we must ensure that the cpu
-        * being offlined is not present in the affinity mask, it may be
-        * selected as the target CPU otherwise
-        */
-       affinity = cpu_online_mask;
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
                cpumask_copy(d->affinity, affinity);
 
        return ret;
index 1309d64aa9268f755504c37ada95c4bb44010cc6..29d48690f2ac59ef56d5384d1fde5db92f0624e7 100644 (file)
@@ -230,9 +230,27 @@ void exit_thread(void)
 {
 }
 
+static void tls_thread_flush(void)
+{
+       asm ("msr tpidr_el0, xzr");
+
+       if (is_compat_task()) {
+               current->thread.tp_value = 0;
+
+               /*
+                * We need to ensure ordering between the shadow state and the
+                * hardware state, so that we don't corrupt the hardware state
+                * with a stale shadow state during context switch.
+                */
+               barrier();
+               asm ("msr tpidrro_el0, xzr");
+       }
+}
+
 void flush_thread(void)
 {
        fpsimd_flush_thread();
+       tls_thread_flush();
        flush_ptrace_hw_breakpoint(current);
 }
 
index de2b0226e06df34fd7aaa4bb55e014fc33f9cddf..dc47e53e9e28c15da99e62976a9ca29f71da8bc4 100644 (file)
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
 
        case __ARM_NR_compat_set_tls:
                current->thread.tp_value = regs->regs[0];
+
+               /*
+                * Protect against register corruption from context switch.
+                * See comment in tls_thread_flush.
+                */
+               barrier();
                asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
                return 0;
 
index 5472c24018766ea5348d2d0cb56f9a932f46356a..a83061f37e431be889442a954864faa9cbfcebb6 100644 (file)
@@ -149,8 +149,7 @@ void __init arm64_memblock_init(void)
                memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
 #endif
 
-       if (!efi_enabled(EFI_MEMMAP))
-               early_init_fdt_scan_reserved_mem();
+       early_init_fdt_scan_reserved_mem();
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
index 4c4ac163c6008b1e3d7f15a3501e5ead1bf89e0a..b6bda183862902cb3476d72683fb0997287d086b 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -6,6 +5,8 @@ CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_IA64_DIG=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
@@ -51,9 +52,6 @@ CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_NET_PCI=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
@@ -85,7 +83,6 @@ CONFIG_EXT3_FS=y
 CONFIG_XFS_FS=y
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -95,17 +92,13 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
 CONFIG_CIFS=m
 CONFIG_CIFS_STATS=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=m
index e8ed3ae70aae1f83be6b17a7b2687bb532dd9c0b..81f686dee53c0107b276a3424bf76e787962effb 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -6,13 +5,13 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=20
 CONFIG_CGROUPS=y
 CONFIG_CPUSETS=y
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
 CONFIG_IA64_CYCLONE=y
@@ -29,14 +28,13 @@ CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_DOCK=y
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -82,16 +80,13 @@ CONFIG_FUSION_FC=m
 CONFIG_FUSION_SAS=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
 CONFIG_IGB=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -151,6 +146,7 @@ CONFIG_USB_STORAGE=m
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_MTHCA=m
 CONFIG_INFINIBAND_IPOIB=m
+CONFIG_INTEL_IOMMU=y
 CONFIG_MSPEC=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
@@ -164,7 +160,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -175,16 +170,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
@@ -225,11 +214,7 @@ CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_T10DIF=y
-CONFIG_INTEL_IOMMU=y
index d663efd1e4db6ebd31d56f931d266686da8123e3..5b4fcdd514576b0eccc7b5c058fbc5ac67118bfc 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -9,6 +8,8 @@ CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_CYCLONE=y
 CONFIG_SMP=y
@@ -24,14 +25,12 @@ CONFIG_BINFMT_MISC=m
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
 CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_BLK_DEV_LOOP=m
@@ -71,15 +70,12 @@ CONFIG_FUSION_SPI=y
 CONFIG_FUSION_FC=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -146,7 +142,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -157,16 +152,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
index b4548a3e82d59b03ccfb6f01c9dc21d9dbf89c5d..f0f69fdbddaeb7b2de127819cc462e7ac4e24ae8 100644 (file)
@@ -1,13 +1,12 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_IA64_HP_SIM=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=y
@@ -49,8 +47,6 @@ CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_INFO=y
index c8a3f40e77f636da639f53997f1c6ea47535d284..192ed157c9ceac72fc3c3d66511654ac8a1cf660 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -11,6 +10,8 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_IA64_DIG=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
@@ -29,14 +30,12 @@ CONFIG_BINFMT_MISC=m
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
 CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_BLK_DEV_LOOP=m
@@ -53,6 +52,7 @@ CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_MD=y
@@ -72,15 +72,12 @@ CONFIG_FUSION_FC=y
 CONFIG_FUSION_CTL=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -118,7 +115,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -129,16 +125,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
@@ -180,6 +170,5 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_IA64_GRANULE_16MB=y
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_MD5=y
index 54bc72eda30d4f5efefa3c3f9949fcba3155adc0..b504c8e2fd52b230bf1ca89efcb4d2677ad57b4c 100644 (file)
@@ -1,9 +1,9 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_IA64_HP_ZX1=y
 CONFIG_MCKINLEY=y
 CONFIG_SMP=y
@@ -18,6 +18,7 @@ CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
@@ -37,9 +38,9 @@ CONFIG_CHR_DEV_OSST=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_FUSION=y
@@ -48,18 +49,15 @@ CONFIG_FUSION_FC=y
 CONFIG_FUSION_CTL=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=y
 CONFIG_TULIP_MWI=y
 CONFIG_TULIP_MMIO=y
 CONFIG_TULIP_NAPI=y
 CONFIG_TULIP_NAPI_HW_MITIGATION=y
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=y
 CONFIG_E1000=y
-CONFIG_TIGON3=y
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -100,7 +98,6 @@ CONFIG_USB_STORAGE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_UDF_FS=y
@@ -110,12 +107,9 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=y
 CONFIG_NLS_CODEPAGE_775=y
index 6a65bb7d06571050157bda55a30f9fe366387cbf..18026b2eb5823c86ab0455088e9b3b1b0c409393 100644 (file)
 #define __NR_sched_getattr             1337
 #define __NR_renameat2                 1338
 #define __NR_getrandom                 1339
-#define __NR_memfd_create              1339
+#define __NR_memfd_create              1340
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index ec73b2cf912a45d2ce350c26bd19e0de54c09e75..fc505d58f078ecfb55210cd3e802ed23c4b41a8e 100644 (file)
@@ -38,27 +38,6 @@ static void pci_fixup_video(struct pci_dev *pdev)
                return;
        /* Maybe, this machine supports legacy memory map. */
 
-       if (!vga_default_device()) {
-               resource_size_t start, end;
-               int i;
-
-               /* Does firmware framebuffer belong to us? */
-               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-                       if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
-                               continue;
-
-                       start = pci_resource_start(pdev, i);
-                       end  = pci_resource_end(pdev, i);
-
-                       if (!start || !end)
-                               continue;
-
-                       if (screen_info.lfb_base >= start &&
-                           (screen_info.lfb_base + screen_info.lfb_size) < end)
-                               vga_set_default_device(pdev);
-               }
-       }
-
        /* Is VGA routed to us? */
        bus = pdev->bus;
        while (bus) {
@@ -83,8 +62,7 @@ static void pci_fixup_video(struct pci_dev *pdev)
                pci_read_config_word(pdev, PCI_COMMAND, &config);
                if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
                        pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
-                       dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
-                       vga_set_default_device(pdev);
+                       dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n");
                }
        }
 }
index 40e1c1dd0e24a47d38f142ce0c5543d629a0fd50..6feded3b0c4c1eb6d1414082a9f9318041129a39 100644 (file)
@@ -127,7 +127,7 @@ config SECCOMP
 
 endmenu
 
-menu "Advanced setup"
+menu "Kernel features"
 
 config ADVANCED_OPTIONS
        bool "Prompt for advanced kernel configuration options"
@@ -248,10 +248,10 @@ config MICROBLAZE_64K_PAGES
 
 endchoice
 
-endmenu
-
 source "mm/Kconfig"
 
+endmenu
+
 menu "Executable file formats"
 
 source "fs/Kconfig.binfmt"
index b4a4cb150aa998af14d3d359db7c2b73f26397db..596e485ae7074df3b8cf5abba5d390153c1166c7 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/percpu.h>
 #include <asm/ptrace.h>
+#include <linux/linkage.h>
 
 /*
  * These are per-cpu variables required in entry.S, among other
index 0aa005703a0b5f348848afb5936f650d18f28322..59a89a64a8656b01436f4aaf38ec270f5d8b2a7a 100644 (file)
@@ -98,13 +98,13 @@ static inline int access_ok(int type, const void __user *addr,
 
        if ((get_fs().seg < ((unsigned long)addr)) ||
                        (get_fs().seg < ((unsigned long)addr + size - 1))) {
-               pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+               pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
                        type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
                return 0;
        }
 ok:
-       pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+       pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
                        type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
        return 1;
index fd56a8f66489dc68264ac8f70cc70863c6f25380..ea4b233647c1aa32d23562e7abba7dab72d404db 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         381
+#define __NR_syscalls         387
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index 900c7e5333b650666c374a8e378129b1c7eb43f1..574c430006997427b564ef9bdeda2c16c3497279 100644 (file)
@@ -546,6 +546,7 @@ config SGI_IP28
        # select SYS_HAS_EARLY_PRINTK
        select SYS_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
+       select MIPS_L1_CACHE_SHIFT_7
       help
         This is the SGI Indigo2 with R10000 processor.  To compile a Linux
         kernel that runs on these, say Y here.
@@ -2029,7 +2030,9 @@ config MIPS_CMP
        bool "MIPS CMP framework support (DEPRECATED)"
        depends on SYS_SUPPORTS_MIPS_CMP
        select MIPS_GIC_IPI
+       select SMP
        select SYNC_R4K
+       select SYS_SUPPORTS_SMP
        select WEAK_ORDERING
        default n
        help
index 9336509f47ad61b129598d3f1ed6ee4a99aabefa..bbac51e11179af67698250417512a92136a11a0f 100644 (file)
@@ -113,7 +113,16 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips)
+# For smartmips configurations, there are hundreds of warnings due to ISA overrides
+# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
+# similar directives in the kernel will spam the build logs with the following warnings:
+# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
+# or
+# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+# been fixed properly.
+cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips) -Wa,--no-warn
 cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
index 37eb2d1fa69a7f293bfb24ea8f759cfafb56e851..b94bf44d8d8e297f85c9cdce01f9836d457f3bb6 100644 (file)
@@ -434,7 +434,7 @@ static void bcm63xx_init_irq(void)
                irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
                irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
                irq_stat_addr[1] = 0;
-               irq_stat_addr[1] = 0;
+               irq_mask_addr[1] = 0;
                irq_bits = 32;
                ext_irq_count = 4;
                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
@@ -443,7 +443,7 @@ static void bcm63xx_init_irq(void)
                irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
                irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
                irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
-               irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1);
+               irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
                irq_bits = 64;
                ext_irq_count = 4;
                is_ext_irq_cascaded = 1;
index b49c7adbfa895140f1194acb35d294a2a8647511..31903cf9709d6d5b755a22ef0e478ea7fc3585de 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/string.h>
 
 #include <asm/addrspace.h>
 
index 8f219dac95988c29bc0367df12d7b97e59f519b9..e24feb0633aa38e69b1cef9cb32716fe87cf02b0 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PCI=y
 CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
index cc07560213980017291aab163d3781bfe5e29147..48e16d98b2ccac50279514b6a211571de81b3d84 100644 (file)
@@ -28,6 +28,7 @@ CONFIG_MIPS32_COMPAT=y
 CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 2575302aa2be7aaee07c114b3d31de546ba62cc3..4f37a598545995db32c26b0ad025ed10ac328eb1 100644 (file)
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=m
 CONFIG_UNIX=y
 CONFIG_NET_KEY=m
index 4cb787ff273e39023832b7e8ea14cb1d58288c23..1c6191ebd583cef098f9af252c6cece585e5fd51 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_MIPS32_COMPAT=y
 CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
 CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index e18741ea17717d8c6106256c3c614a941a0292e0..f57b96dcf7df57894e4fcc6d582a2f8036670d33 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index cf0e01f814e1c80d7110f7db05a6b487548f558e..d41742dd26c8131279bc6a898461a043a78335ec 100644 (file)
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index edd9ec9cb6781940f03e7653e1b556d40ca11818..a7806e83ea0f1502cf0f78ceb5b0f29a86a07ee4 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index d269a5326a304af098fad6166b8bb1936e5db525..9b6926d6bb3294cfff7f87b832f683249089c065 100644 (file)
@@ -27,6 +27,7 @@ CONFIG_PD6729=m
 CONFIG_I82092=m
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=m
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 2f660e9a0da68c7dadf318cb77b628cd601dc774..70509a48df826f3e43545b937737b295095e28e5 100644 (file)
@@ -63,6 +63,7 @@ CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
 CONFIG_PM_RUNTIME=y
 CONFIG_PM_DEBUG=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index c6f84655c98a847909ca379784fb09dc6dbd2d80..82207e8079f32696b9b179cf1bfc34242e4035ee 100644 (file)
@@ -43,6 +43,7 @@ CONFIG_PCI_DEBUG=y
 CONFIG_BINFMT_MISC=m
 CONFIG_PM_RUNTIME=y
 CONFIG_PM_DEBUG=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 29d79ae8a823346db977af2c0ece07297e5c1452..db029f4ff759159cf6abc278837e1c04aef31d9e 100644 (file)
@@ -20,6 +20,7 @@ CONFIG_MODVERSIONS=y
 CONFIG_PCI=y
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=m
 CONFIG_UNIX=y
 CONFIG_NET_KEY=m
index d0352983b94d6f21f859bedc771a3dbcf5f1a757..51f80bd36fcc457a46782bc4266241d4de34ee2c 100644 (file)
@@ -16,8 +16,8 @@
 extern void octeon_cop2_save(struct octeon_cop2_state *);
 extern void octeon_cop2_restore(struct octeon_cop2_state *);
 
-#define cop2_save(r)           octeon_cop2_save(r)
-#define cop2_restore(r)                octeon_cop2_restore(r)
+#define cop2_save(r)           octeon_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r)                octeon_cop2_restore(&(r)->thread.cp2)
 
 #define cop2_present           1
 #define cop2_lazy_restore      1
@@ -26,26 +26,26 @@ extern void octeon_cop2_restore(struct octeon_cop2_state *);
 
 extern void nlm_cop2_save(struct nlm_cop2_state *);
 extern void nlm_cop2_restore(struct nlm_cop2_state *);
-#define cop2_save(r)           nlm_cop2_save(r)
-#define cop2_restore(r)                nlm_cop2_restore(r)
+
+#define cop2_save(r)           nlm_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r)                nlm_cop2_restore(&(r)->thread.cp2)
 
 #define cop2_present           1
 #define cop2_lazy_restore      0
 
 #elif defined(CONFIG_CPU_LOONGSON3)
 
-#define cop2_save(r)
-#define cop2_restore(r)
-
 #define cop2_present           1
 #define cop2_lazy_restore      1
+#define cop2_save(r)           do { (r); } while (0)
+#define cop2_restore(r)                do { (r); } while (0)
 
 #else
 
 #define cop2_present           0
 #define cop2_lazy_restore      0
-#define cop2_save(r)
-#define cop2_restore(r)
+#define cop2_save(r)           do { (r); } while (0)
+#define cop2_restore(r)                do { (r); } while (0)
 #endif
 
 enum cu2_ops {
index 5d6a76434d00b12a7bb4e2078e03b86c2e8f8e49..c4a912733b65d8d113e344009232021d7dd6baac 100644 (file)
 #ifndef _ASM_MACH_IP28_SPACES_H
 #define _ASM_MACH_IP28_SPACES_H
 
-#define CAC_BASE       _AC(0xa800000000000000, UL)
-
-#define HIGHMEM_START  (~0UL)
-
 #define PHYS_OFFSET    _AC(0x20000000, UL)
 
-#define UNCAC_BASE     _AC(0xc0000000, UL)     /* 0xa0000000 + PHYS_OFFSET */
-#define IO_BASE                UNCAC_BASE
-
 #include <asm/mach-generic/spaces.h>
 
 #endif /* _ASM_MACH_IP28_SPACES_H */
index 5699ec3a71af3b42c6afb98b33a87ceceb58c175..3be81803595d5b6b41044804124d0ab93745f5dd 100644 (file)
@@ -37,7 +37,7 @@
 
 /*
  * This is used for calculating the real page sizes
- * for FTLB or VTLB + FTLB confugrations.
+ * for FTLB or VTLB + FTLB configurations.
  */
 static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
 {
@@ -223,7 +223,8 @@ static inline int pfn_valid(unsigned long pfn)
 
 #endif
 
-#define virt_to_page(kaddr)    pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
+#define virt_to_page(kaddr)    pfn_to_page(PFN_DOWN(virt_to_phys((void *)     \
+                                                                 (kaddr))))
 
 extern int __virt_addr_valid(const volatile void *kaddr);
 #define virt_addr_valid(kaddr)                                         \
index 1e0f20a9cddaa0caac16e0e5adfe09537cdce1dc..eacf865d21c2fc533a36d6b2138253dacba667a9 100644 (file)
@@ -37,11 +37,6 @@ extern int __cpu_logical_map[NR_CPUS];
 
 #define NO_PROC_ID     (-1)
 
-#define topology_physical_package_id(cpu)      (cpu_data[cpu].package)
-#define topology_core_id(cpu)                  (cpu_data[cpu].core)
-#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
-#define topology_thread_cpumask(cpu)           (&cpu_sibling_map[cpu])
-
 #define SMP_RESCHEDULE_YOURSELF 0x1    /* XXX braindead */
 #define SMP_CALL_FUNCTION      0x2
 /* Octeon - Tell another core to flush its icache */
index 495c1041a2cc24e3ca1207ac31e7e3ff41f45382..b928b6f898cd5266efe89465d2dc87089f8f357c 100644 (file)
@@ -92,7 +92,7 @@ do {                                                                  \
                        KSTK_STATUS(prev) &= ~ST0_CU2;                  \
                __c0_stat = read_c0_status();                           \
                write_c0_status(__c0_stat | ST0_CU2);                   \
-               cop2_save(&prev->thread.cp2);                           \
+               cop2_save(prev);                                        \
                write_c0_status(__c0_stat & ~ST0_CU2);                  \
        }                                                               \
        __clear_software_ll_bit();                                      \
@@ -111,7 +111,7 @@ do {                                                                        \
                        (KSTK_STATUS(current) & ST0_CU2)) {             \
                __c0_stat = read_c0_status();                           \
                write_c0_status(__c0_stat | ST0_CU2);                   \
-               cop2_restore(&current->thread.cp2);                     \
+               cop2_restore(current);                                  \
                write_c0_status(__c0_stat & ~ST0_CU2);                  \
        }                                                               \
        if (cpu_has_dsp)                                                \
index 20ea4859c82259534425ceb3833aa098f7400cf2..3e307ec2afbae2f7308df902f450006bafd006f9 100644 (file)
@@ -9,5 +9,13 @@
 #define __ASM_TOPOLOGY_H
 
 #include <topology.h>
+#include <linux/smp.h>
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu)      (cpu_data[cpu].package)
+#define topology_core_id(cpu)                  (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
+#define topology_thread_cpumask(cpu)           (&cpu_sibling_map[cpu])
+#endif
 
 #endif /* __ASM_TOPOLOGY_H */
index 9bc13eaf9d6776e8e9e2dd2dbde8401784dfb45c..fdb4923777d1347822517b9671b80f378c74d16b 100644 (file)
 #define __NR_sched_getattr             (__NR_Linux + 350)
 #define __NR_renameat2                 (__NR_Linux + 351)
 #define __NR_seccomp                   (__NR_Linux + 352)
+#define __NR_getrandom                 (__NR_Linux + 353)
+#define __NR_memfd_create              (__NR_Linux + 354)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            352
+#define __NR_Linux_syscalls            354
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                352
+#define __NR_O32_Linux_syscalls                354
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_sched_getattr             (__NR_Linux + 310)
 #define __NR_renameat2                 (__NR_Linux + 311)
 #define __NR_seccomp                   (__NR_Linux + 312)
+#define __NR_getrandom                 (__NR_Linux + 313)
+#define __NR_memfd_create              (__NR_Linux + 314)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            312
+#define __NR_Linux_syscalls            314
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         312
+#define __NR_64_Linux_syscalls         314
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_sched_getattr             (__NR_Linux + 314)
 #define __NR_renameat2                 (__NR_Linux + 315)
 #define __NR_seccomp                   (__NR_Linux + 316)
+#define __NR_getrandom                 (__NR_Linux + 317)
+#define __NR_memfd_create              (__NR_Linux + 318)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            316
+#define __NR_Linux_syscalls            318
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                316
+#define __NR_N32_Linux_syscalls                318
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 992e18474da5da89f1ff4b878dbbc8e8db47e919..50980bf3983ef3654d4f24c1dbf95e4576d0c3a8 100644 (file)
@@ -71,8 +71,12 @@ machine_kexec(struct kimage *image)
        kexec_start_address =
                (unsigned long) phys_to_virt(image->start);
 
-       kexec_indirection_page =
-               (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+       if (image->type == KEXEC_TYPE_DEFAULT) {
+               kexec_indirection_page =
+                       (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+       } else {
+               kexec_indirection_page = (unsigned long)&image->head;
+       }
 
        memcpy((void*)reboot_code_buffer, relocate_new_kernel,
               relocate_new_kernel_size);
index 5d25462de8a6100c6a1f9f421116632246e54f80..2f7c734771f4e0ad283bb6bf5ee11a0f4b57d707 100644 (file)
@@ -129,7 +129,11 @@ NESTED(_mcount, PT_SIZE, ra)
         nop
 #endif
        b       ftrace_stub
+#ifdef CONFIG_32BIT
+        addiu sp, sp, 8
+#else
         nop
+#endif
 
 static_trace:
        MCOUNT_SAVE_REGS
@@ -139,6 +143,9 @@ static_trace:
         move   a1, AT          /* arg2: parent's return address */
 
        MCOUNT_RESTORE_REGS
+#ifdef CONFIG_32BIT
+       addiu sp, sp, 8
+#endif
        .globl ftrace_stub
 ftrace_stub:
        RETURN_BACK
@@ -183,6 +190,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
        jal     prepare_ftrace_return
         nop
        MCOUNT_RESTORE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_32BIT
+       addiu sp, sp, 8
+#endif
+#endif
        RETURN_BACK
        END(ftrace_graph_caller)
 
index f93b4cbec739efedeb3cc2f7347aa3c573182c81..744cd10ba599e260b97e0a53da6d872792f7bd54 100644 (file)
@@ -577,3 +577,5 @@ EXPORT(sys_call_table)
        PTR     sys_sched_getattr               /* 4350 */
        PTR     sys_renameat2
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
index 03ebd9979ad29553139a009b5fe27e1685c56958..002b1bc09c387c44dcd5ac4157a97ef8223344cb 100644 (file)
@@ -432,4 +432,6 @@ EXPORT(sys_call_table)
        PTR     sys_sched_getattr               /* 5310 */
        PTR     sys_renameat2
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
        .size   sys_call_table,.-sys_call_table
index ebc9228e2e150b5d2f853781066aaddcf500d41b..ca6cbbe9805bf8cdc28657468e93390c1ff117eb 100644 (file)
@@ -425,4 +425,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_sched_getattr
        PTR     sys_renameat2                   /* 6315 */
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
        .size   sysn32_call_table,.-sysn32_call_table
index 25bb8400156da3905d1ba2d586da26a4672b4379..9e10d11fbb84799840f48ca08143bf44aeddc644 100644 (file)
@@ -562,4 +562,6 @@ EXPORT(sys32_call_table)
        PTR     sys_sched_getattr               /* 4350 */
        PTR     sys_renameat2
        PTR     sys_seccomp
+       PTR     sys_getrandom
+       PTR     sys_memfd_create
        .size   sys32_call_table,.-sys32_call_table
index bf0fc6b16ad9487648c0e38f945db0fd7f5461d6..7a4727795a707764fda6014bf5db92d06e63052d 100644 (file)
@@ -650,9 +650,9 @@ static inline int cop1_64bit(struct pt_regs *xcp)
 #define SIFROMREG(si, x)                                               \
 do {                                                                   \
        if (cop1_64bit(xcp))                                            \
-               (si) = get_fpr32(&ctx->fpr[x], 0);                      \
+               (si) = (int)get_fpr32(&ctx->fpr[x], 0);                 \
        else                                                            \
-               (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1);         \
+               (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1);    \
 } while (0)
 
 #define SITOREG(si, x)                                                 \
@@ -667,7 +667,7 @@ do {                                                                        \
        }                                                               \
 } while (0)
 
-#define SIFROMHREG(si, x)      ((si) = get_fpr32(&ctx->fpr[x], 1))
+#define SIFROMHREG(si, x)      ((si) = (int)get_fpr32(&ctx->fpr[x], 1))
 
 #define SITOHREG(si, x)                                                        \
 do {                                                                   \
index 571aab064936d0eff9adefd0046882608374a883..f42e35e42790fdcd7bc2a6e310ef9a3197a72879 100644 (file)
@@ -53,6 +53,7 @@
  */
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 /*
  * Not static inline because used by IP27 special magic initialization code
index 05a56619ece2f72044641c6aa1fd2d99f558bb69..9f7ecbda250c2569daae114e9fae539407d00766 100644 (file)
@@ -793,6 +793,7 @@ static int build_body(struct jit_ctx *ctx)
        const struct sock_filter *inst;
        unsigned int i, off, load_order, condt;
        u32 k, b_off __maybe_unused;
+       int tmp;
 
        for (i = 0; i < prog->len; i++) {
                u16 code;
@@ -1332,9 +1333,9 @@ jmp_cmp:
                case BPF_ANC | SKF_AD_PKTTYPE:
                        ctx->flags |= SEEN_SKB;
 
-                       off = pkt_type_offset();
+                       tmp = off = pkt_type_offset();
 
-                       if (off < 0)
+                       if (tmp < 0)
                                return -1;
                        emit_load_byte(r_tmp, r_skb, off, ctx);
                        /* Keep only the last 3 bits */
index 6e75e203092764a0fe42d7211e547af9dd32f35b..1554a6f2a5bb4bf573d532fa35eb8be8deaa1404 100644 (file)
@@ -321,6 +321,22 @@ source "fs/Kconfig"
 
 source "arch/parisc/Kconfig.debug"
 
+config SECCOMP
+       def_bool y
+       prompt "Enable seccomp to safely compute untrusted bytecode"
+       ---help---
+         This kernel feature is useful for number crunching applications
+         that may need to compute untrusted bytecode during their
+         execution. By using pipes or other transports made available to
+         the process as file descriptors supporting the read/write
+         syscalls, it's possible to isolate those applications in
+         their own address space using seccomp. Once seccomp is
+         enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+         and the task is only allowed to execute a few safe syscalls
+         defined by each seccomp mode.
+
+         If unsure, say Y. Only embedded should say N here.
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
index 7187664034c3499b92c62210d0d347b22ae24adc..5db8882f732c4fe5fae9bc3f2627e9c3b46ab06d 100644 (file)
@@ -48,7 +48,12 @@ cflags-y     := -pipe
 
 # These flags should be implied by an hppa-linux configuration, but they
 # are not in gcc 3.2.
-cflags-y       += -mno-space-regs -mfast-indirect-calls
+cflags-y       += -mno-space-regs
+
+# -mfast-indirect-calls is only relevant for 32-bit kernels.
+ifndef CONFIG_64BIT
+cflags-y       += -mfast-indirect-calls
+endif
 
 # Currently we save and restore fpregs on all kernel entry/interruption paths.
 # If that gets optimized, we might need to disable the use of fpregs in the
index 90025322b75ecd3b8a5672b76d18ce534b92ee2f..0490199d7b15b00e476435193f168c894da80538 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_PD6729=m
 CONFIG_I82092=m
 # CONFIG_SUPERIO is not set
 # CONFIG_CHASSIS_LCD_LED is not set
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 8249ac9d9cfcc2a2e74d1111ac4b16ac54e67493..269c23d23fcb6f087f6c3ace905e0c8bbc598e4c 100644 (file)
@@ -33,6 +33,7 @@ CONFIG_PCI_LBA=y
 # CONFIG_PDC_CHASSIS_WARN is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index d9dc6cd3b7d2bb549bd7bc24efd2339a894aad0b..e5c4da03581056efc5630c47b3dacdfdc1980c07 100644 (file)
@@ -456,7 +456,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
                }
 
                /* String could be altered by userspace after strlen_user() */
-               fsname[len] = '\0';
+               fsname[len - 1] = '\0';
 
                printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname);
                if ( !strcmp(fsname, "hfs") ) {
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h
new file mode 100644 (file)
index 0000000..015f788
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_PARISC_SECCOMP_H
+#define _ASM_PARISC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+
+#endif /* _ASM_PARISC_SECCOMP_H */
index 4b9b10ce1f9d09d771beda26c7b48f5eba8ee75a..a84611835549c2f9a1b93e999f593a058fcc8add 100644 (file)
@@ -60,6 +60,7 @@ struct thread_info {
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
 #define TIF_BLOCKSTEP          10      /* branch stepping? */
+#define TIF_SECCOMP            11      /* secure computing */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
@@ -70,11 +71,13 @@ struct thread_info {
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
+#define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |        \
-                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
+                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SECCOMP)
 
 #ifdef CONFIG_64BIT
 # ifdef CONFIG_COMPAT
index 47e0e21d2272468bbc864e6e30ced36c18929534..8667f18be2385d7423da2efac3803671b500740e 100644 (file)
 #define __NR_sched_getattr     (__NR_Linux + 335)
 #define __NR_utimes            (__NR_Linux + 336)
 #define __NR_renameat2         (__NR_Linux + 337)
+#define __NR_seccomp           (__NR_Linux + 338)
+#define __NR_getrandom         (__NR_Linux + 339)
+#define __NR_memfd_create      (__NR_Linux + 340)
 
-#define __NR_Linux_syscalls    (__NR_renameat2 + 1)
+#define __NR_Linux_syscalls    (__NR_memfd_create + 1)
 
 
 #define __IGNORE_select                /* newselect */
index e842ee233db44ef902899280fbadd456175a4de3..92438c21d4532e4c55d93adace8da60b081e88cf 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/user.h>
 #include <linux/personality.h>
 #include <linux/security.h>
+#include <linux/seccomp.h>
 #include <linux/compat.h>
 #include <linux/signal.h>
 #include <linux/audit.h>
@@ -270,6 +271,9 @@ long do_syscall_trace_enter(struct pt_regs *regs)
 {
        long ret = 0;
 
+       /* Do the secure computing check first. */
+       secure_computing_strict(regs->gr[20]);
+
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
                ret = -1L;
index 83878601103701df4497913c89d5702897249a38..7ef22e3387e09f8b63b2a6532b0c995bb12b6e96 100644 (file)
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page)
        /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
        /* Light-weight-syscall entry must always be located at 0xb0 */
        /* WARNING: Keep this number updated with table size changes */
-#define __NR_lws_entries (2)
+#define __NR_lws_entries (3)
 
 lws_entry:
        gate    lws_start, %r0          /* increase privilege */
@@ -502,7 +502,7 @@ lws_exit:
 
        
        /***************************************************
-               Implementing CAS as an atomic operation:
+               Implementing 32bit CAS as an atomic operation:
 
                %r26 - Address to examine
                %r25 - Old value to check (old)
@@ -659,6 +659,230 @@ cas_action:
        ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
 
 
+       /***************************************************
+               New CAS implementation which uses pointers and variable size
+               information. The value pointed by old and new MUST NOT change
+               while performing CAS. The lock only protect the value at %r26.
+
+               %r26 - Address to examine
+               %r25 - Pointer to the value to check (old)
+               %r24 - Pointer to the value to set (new)
+               %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
+               %r28 - Return non-zero on failure
+               %r21 - Kernel error code
+
+               %r21 has the following meanings:
+
+               EAGAIN - CAS is busy, ldcw failed, try again.
+               EFAULT - Read or write failed.
+
+               Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
+
+       ****************************************************/
+
+       /* ELF32 Process entry path */
+lws_compare_and_swap_2:
+#ifdef CONFIG_64BIT
+       /* Clip the input registers */
+       depdi   0, 31, 32, %r26
+       depdi   0, 31, 32, %r25
+       depdi   0, 31, 32, %r24
+       depdi   0, 31, 32, %r23
+#endif
+
+       /* Check the validity of the size pointer */
+       subi,>>= 4, %r23, %r0
+       b,n     lws_exit_nosys
+
+       /* Jump to the functions which will load the old and new values into
+          registers depending on the their size */
+       shlw    %r23, 2, %r29
+       blr     %r29, %r0
+       nop
+
+       /* 8bit load */
+4:     ldb     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+5:     ldb     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 16bit load */
+6:     ldh     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+7:     ldh     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 32bit load */
+8:     ldw     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+9:     ldw     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 64bit load */
+#ifdef CONFIG_64BIT
+10:    ldd     0(%sr3,%r25), %r25
+11:    ldd     0(%sr3,%r24), %r24
+#else
+       /* Load new value into r22/r23 - high/low */
+10:    ldw     0(%sr3,%r25), %r22
+11:    ldw     4(%sr3,%r25), %r23
+       /* Load new value into fr4 for atomic store later */
+12:    flddx   0(%sr3,%r24), %fr4
+#endif
+
+cas2_lock_start:
+       /* Load start of lock table */
+       ldil    L%lws_lock_start, %r20
+       ldo     R%lws_lock_start(%r20), %r28
+
+       /* Extract four bits from r26 and hash lock (Bits 4-7) */
+       extru  %r26, 27, 4, %r20
+
+       /* Find lock to use, the hash is either one of 0 to
+          15, multiplied by 16 (keep it 16-byte aligned)
+          and add to the lock table offset. */
+       shlw    %r20, 4, %r20
+       add     %r20, %r28, %r20
+
+       rsm     PSW_SM_I, %r0                   /* Disable interrupts */
+       /* COW breaks can cause contention on UP systems */
+       LDCW    0(%sr2,%r20), %r28              /* Try to acquire the lock */
+       cmpb,<>,n       %r0, %r28, cas2_action  /* Did we get it? */
+cas2_wouldblock:
+       ldo     2(%r0), %r28                    /* 2nd case */
+       ssm     PSW_SM_I, %r0
+       b       lws_exit                        /* Contended... */
+       ldo     -EAGAIN(%r0), %r21              /* Spin in userspace */
+
+       /*
+               prev = *addr;
+               if ( prev == old )
+                 *addr = new;
+               return prev;
+       */
+
+       /* NOTES:
+               This all works becuse intr_do_signal
+               and schedule both check the return iasq
+               and see that we are on the kernel page
+               so this process is never scheduled off
+               or is ever sent any signal of any sort,
+               thus it is wholly atomic from usrspaces
+               perspective
+       */
+cas2_action:
+       /* Jump to the correct function */
+       blr     %r29, %r0
+       /* Set %r28 as non-zero for now */
+       ldo     1(%r0),%r28
+
+       /* 8bit CAS */
+13:    ldb,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+14:    stb,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 16bit CAS */
+15:    ldh,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+16:    sth,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 32bit CAS */
+17:    ldw,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+18:    stw,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 64bit CAS */
+#ifdef CONFIG_64BIT
+19:    ldd,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+20:    std,ma  %r24, 0(%sr3,%r26)
+       copy    %r0, %r28
+#else
+       /* Compare first word */
+19:    ldw,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r22, %r0
+       b,n     cas2_end
+       /* Compare second word */
+20:    ldw,ma  4(%sr3,%r26), %r29
+       sub,=   %r29, %r23, %r0
+       b,n     cas2_end
+       /* Perform the store */
+21:    fstdx   %fr4, 0(%sr3,%r26)
+       copy    %r0, %r28
+#endif
+
+cas2_end:
+       /* Free lock */
+       stw,ma  %r20, 0(%sr2,%r20)
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
+       /* Return to userspace, set no error */
+       b       lws_exit
+       copy    %r0, %r21
+
+22:
+       /* Error occurred on load or store */
+       /* Free lock */
+       stw     %r20, 0(%sr2,%r20)
+       ssm     PSW_SM_I, %r0
+       ldo     1(%r0),%r28
+       b       lws_exit
+       ldo     -EFAULT(%r0),%r21       /* set errno */
+       nop
+       nop
+       nop
+
+       /* Exception table entries, for the load and store, return EFAULT.
+          Each of the entries must be relocated. */
+       ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+       ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
+#endif
+
        /* Make sure nothing else is placed on this page */
        .align PAGE_SIZE
 END(linux_gateway_page)
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page)
        /* Light-weight-syscall table */
        /* Start of lws table. */
 ENTRY(lws_table)
-       LWS_ENTRY(compare_and_swap32)   /* 0 - ELF32 Atomic compare and swap */
-       LWS_ENTRY(compare_and_swap64)   /* 1 - ELF64 Atomic compare and swap */
+       LWS_ENTRY(compare_and_swap32)           /* 0 - ELF32 Atomic 32bit CAS */
+       LWS_ENTRY(compare_and_swap64)           /* 1 - ELF64 Atomic 32bit CAS */
+       LWS_ENTRY(compare_and_swap_2)           /* 2 - ELF32 Atomic 64bit CAS */
 END(lws_table)
        /* End of lws table */
 
index 84c5d3a58fa189b91e8671c29879d27238ccb160..b563d9c8268b153429a06cfc85924c236d00c688 100644 (file)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
        ENTRY_SAME(renameat2)
+       ENTRY_SAME(seccomp)
+       ENTRY_SAME(getrandom)
+       ENTRY_SAME(memfd_create)        /* 340 */
 
        /* Nothing yet */
 
index 5e2aa43562b5078662b79b26c41165e259761508..59734916986a9e53cd17ec2b1c9a27ec445d3b85 100644 (file)
@@ -29,6 +29,7 @@ CONFIG_PM=y
 CONFIG_PCI_MSI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_SHPC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index 4bee1a6d41d04d0694ee6ad45ab98e9ebfd70b44..45fd06cdc3e86639d3ddc829c4d44c8db0069bd0 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
index 6d7b22f41b5077a6d0f614fceb58c596856a209d..77d7bf3ca2acb014def979d68d21c7f94401ed10 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
index 4b07bade1ba99942e88ea9cd4dd251996cbede70..269d6e47c67d2a998272f27545ec238766c68dc8 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_ALTIVEC=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=24
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
index 3c72fa615bd9e29e135df83b7944fa48c922db8f..7594c5ac64818f6b01537bb0c0be3519c73bd455 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_BLK_DEV_INITRD=y
index 95e545d9f25c94935c1cee35a9e2b45e7c0169cf..c8b6a9ddb21bf813c448dfc4f847a3f611c23dd1 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 # CONFIG_COMPAT_BRK is not set
index cec044a3ff69dd6c05dcfec122fcb53653b412f8..e5e7838af0088f04b8faf5f40d8204962bd0bb6f 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_ALTIVEC=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
index 553e6627801071f2c12ef61dfab1245522ea189f..0351b5ffdfef7dc76b69376e8d15c20f01c8e8b4 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_HIBERNATION=y
 CONFIG_APM_EMULATION=y
 CONFIG_PCCARD=m
 CONFIG_YENTA=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index f26b267eb71fa947db544f202e6cd55b581b1821..36518870e6b29f75811b0b7ec4cddcb7859c04b5 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_VSX=y
 CONFIG_SMP=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -57,6 +58,7 @@ CONFIG_ELECTRA_CF=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_RPA=m
 CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 438e813dc9cb1c18cf4605f5d937135100d39530..c3a3269b08657d5c29208030dd55a978da3a6308 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_PPC_BOOK3E_64=y
 CONFIG_SMP=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_TASKSTATS=y
@@ -32,6 +33,7 @@ CONFIG_SPARSEMEM_MANUAL=y
 CONFIG_PCI_MSI=y
 CONFIG_PCCARD=y
 CONFIG_HOTPLUG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index fdee37fab81c5aa1f70263e7c4979fc0df7e9956..2e637c881d2b44be41b9db66145ac7863cbe1be1 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_LZMA=y
index a905063281cc329b376605fa583d1e3282ce36c2..dd2a9cab4b5012b69c00694c13347f8a52824f9c 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=2048
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
@@ -52,6 +53,7 @@ CONFIG_SCHED_SMT=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_RPA=m
 CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 58e3dbf43ca419f6877c7f87ac856e4ace764560..63392f4b29a4f7876210b48a52bff5ba39f2ee77 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2048
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
@@ -54,6 +55,7 @@ CONFIG_SCHED_SMT=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_RPA=m
 CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 279b80f3bb293d0bceceecb9eb38ff344e26b8fb..c0c61fa9cd9e4d3440798c9c1c0f93cb9276fcfa 100644 (file)
                                 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
 #define STACK_FRAME_MARKER     12
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STACK_FRAME_MIN_SIZE   32
+#else
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
+#endif
+
 /* Size of dummy stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     128
 #define __SIGNAL_FRAMESIZE32   64
@@ -60,6 +66,7 @@
 #define STACK_FRAME_REGS_MARKER        ASM_CONST(0x72656773)
 #define STACK_INT_FRAME_SIZE   (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
 #define STACK_FRAME_MARKER     2
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
 
 /* Size of stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     64
index 542bc0f0673fc8e792d984b8215511cf4b4566b4..7d8a600688058ec5025cfd3688b6eb369cdadea5 100644 (file)
@@ -362,3 +362,6 @@ SYSCALL(ni_syscall) /* sys_kcmp */
 SYSCALL_SPU(sched_setattr)
 SYSCALL_SPU(sched_getattr)
 SYSCALL_SPU(renameat2)
+SYSCALL_SPU(seccomp)
+SYSCALL_SPU(getrandom)
+SYSCALL_SPU(memfd_create)
index 5ce5552ab9f5ca24fe479bfd81ea4a915a478a90..4e9af3fd43e7d062755255d932c2123ea5f54c8a 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          358
+#define __NR_syscalls          361
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 2d526f7b48da121f60b2c3c7c069b07199a76ff6..0688fc06e18394268002ae2b7e419f183e6a7dc4 100644 (file)
 #define __NR_sched_setattr     355
 #define __NR_sched_getattr     356
 #define __NR_renameat2         357
+#define __NR_seccomp           358
+#define __NR_getrandom         359
+#define __NR_memfd_create      360
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 74d1e780748b58f17f8987218a6184c87aae63da..2396dda282cdef0ed5c11c6ab7c3f4f479d0ac04 100644 (file)
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
                return 0;               /* must be 16-byte aligned */
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return 0;
-       if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
+       if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
                return 1;
        /*
         * sp could decrease when we jump off an interrupt stack
index 97ac8dc33667f1319802fd08a8c1d9d56b8905b5..5e1ed1575aabe23c245edcdff06433cfb0a62327 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <asm/opal.h>
 #include <asm/cputable.h>
+#include <asm/machdep.h>
 
 static int opal_hmi_handler_nb_init;
 struct OpalHmiEvtNode {
@@ -185,4 +186,4 @@ static int __init opal_hmi_handler_init(void)
        }
        return 0;
 }
-subsys_initcall(opal_hmi_handler_init);
+machine_subsys_initcall(powernv, opal_hmi_handler_init);
index c904583baf4b94d46e5a0ae3b4f5dca134a4bc87..17ee193960a09aab4e5e4502525f4b55788bdcce 100644 (file)
@@ -113,7 +113,7 @@ out:
 static int pseries_remove_mem_node(struct device_node *np)
 {
        const char *type;
-       const unsigned int *regs;
+       const __be32 *regs;
        unsigned long base;
        unsigned int lmb_size;
        int ret = -EINVAL;
@@ -132,8 +132,8 @@ static int pseries_remove_mem_node(struct device_node *np)
        if (!regs)
                return ret;
 
-       base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       base = be64_to_cpu(*(unsigned long *)regs);
+       lmb_size = be32_to_cpu(regs[3]);
 
        pseries_remove_memblock(base, lmb_size);
        return 0;
@@ -153,7 +153,7 @@ static inline int pseries_remove_mem_node(struct device_node *np)
 static int pseries_add_mem_node(struct device_node *np)
 {
        const char *type;
-       const unsigned int *regs;
+       const __be32 *regs;
        unsigned long base;
        unsigned int lmb_size;
        int ret = -EINVAL;
@@ -172,8 +172,8 @@ static int pseries_add_mem_node(struct device_node *np)
        if (!regs)
                return ret;
 
-       base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       base = be64_to_cpu(*(unsigned long *)regs);
+       lmb_size = be32_to_cpu(regs[3]);
 
        /*
         * Update memory region to represent the memory add
@@ -187,14 +187,14 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
        struct of_drconf_cell *new_drmem, *old_drmem;
        unsigned long memblock_size;
        u32 entries;
-       u32 *p;
+       __be32 *p;
        int i, rc = -EINVAL;
 
        memblock_size = pseries_memory_block_size();
        if (!memblock_size)
                return -EINVAL;
 
-       p = (u32 *) pr->old_prop->value;
+       p = (__be32 *) pr->old_prop->value;
        if (!p)
                return -EINVAL;
 
@@ -203,28 +203,30 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
         * entries. Get the niumber of entries and skip to the array of
         * of_drconf_cell's.
         */
-       entries = *p++;
+       entries = be32_to_cpu(*p++);
        old_drmem = (struct of_drconf_cell *)p;
 
-       p = (u32 *)pr->prop->value;
+       p = (__be32 *)pr->prop->value;
        p++;
        new_drmem = (struct of_drconf_cell *)p;
 
        for (i = 0; i < entries; i++) {
-               if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) &&
-                   (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) {
-                       rc = pseries_remove_memblock(old_drmem[i].base_addr,
+               if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
+                   (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
+                       rc = pseries_remove_memblock(
+                               be64_to_cpu(old_drmem[i].base_addr),
                                                     memblock_size);
                        break;
-               } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) &&
-                          (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) {
-                       rc = memblock_add(old_drmem[i].base_addr,
+               } else if ((!(be32_to_cpu(old_drmem[i].flags) &
+                           DRCONF_MEM_ASSIGNED)) &&
+                           (be32_to_cpu(new_drmem[i].flags) &
+                           DRCONF_MEM_ASSIGNED)) {
+                       rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
                                          memblock_size);
                        rc = (rc < 0) ? -EINVAL : 0;
                        break;
                }
        }
-
        return rc;
 }
 
index 3ca1894ade09e57c4158bc2d6c716da25f49665c..9d94fdd9f525e9503d7f39f0a71c89cacaf53349 100644 (file)
@@ -63,6 +63,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
index 4830aa6e6f53cdcc45546cf22e86891d555c1c7e..90f514baa37d0b091eda7e04d45d4be45be3cd67 100644 (file)
@@ -61,6 +61,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
index 61db449bf309c058c7c7a063f2e6eee6fd4472d4..13559d32af696b9917d9c84678beea19f498461f 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
index 948e0e057a23f84af2b0dbcf5b23f3624cdf508f..e376789f2d8daa6ac407ca2c103d2f49be69ad6f 100644 (file)
@@ -23,6 +23,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SECCOMP is not set
 # CONFIG_IUCV is not set
+CONFIG_NET=y
 CONFIG_ATM=y
 CONFIG_ATM_LANE=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
index 2e56498a40df20683a2eb08dc72f3b48d6216003..fab35a8efa4f924ee2b69a3a3c9e32e73ebaa9a6 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_CMA=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_NET_KEY=y
index 2fcccc0c997cc3102dcb67e5ea2f60fc4f4200b2..c81661e756a0396bb8e90dee214492b154081d91 100644 (file)
 #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
                              sizeof(struct ipl_block_fcp))
 
-#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8)
+#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
 
 #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
                              sizeof(struct ipl_block_ccw))
 
-#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8)
+#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
 
 #define IPL_MAX_SUPPORTED_VERSION (0)
 
@@ -38,10 +38,11 @@ struct ipl_list_hdr {
        u8  pbt;
        u8  flags;
        u16 reserved2;
+       u8  loadparm[8];
 } __attribute__((packed));
 
 struct ipl_block_fcp {
-       u8  reserved1[313-1];
+       u8  reserved1[305-1];
        u8  opt;
        u8  reserved2[3];
        u16 reserved3;
@@ -62,7 +63,6 @@ struct ipl_block_fcp {
                                 offsetof(struct ipl_block_fcp, scp_data)))
 
 struct ipl_block_ccw {
-       u8  load_parm[8];
        u8  reserved1[84];
        u8  reserved2[2];
        u16 devno;
index 22aac5885ba23eca4590d9219aefc1fab34d9325..39badb9ca0b30c6b7b32ce720165e85c3c33319a 100644 (file)
@@ -455,22 +455,6 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
 DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
                   IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
 
-static struct attribute *ipl_fcp_attrs[] = {
-       &sys_ipl_type_attr.attr,
-       &sys_ipl_device_attr.attr,
-       &sys_ipl_fcp_wwpn_attr.attr,
-       &sys_ipl_fcp_lun_attr.attr,
-       &sys_ipl_fcp_bootprog_attr.attr,
-       &sys_ipl_fcp_br_lba_attr.attr,
-       NULL,
-};
-
-static struct attribute_group ipl_fcp_attr_group = {
-       .attrs = ipl_fcp_attrs,
-};
-
-/* CCW ipl device attributes */
-
 static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
                                     struct kobj_attribute *attr, char *page)
 {
@@ -487,6 +471,23 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
 static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
        __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
 
+static struct attribute *ipl_fcp_attrs[] = {
+       &sys_ipl_type_attr.attr,
+       &sys_ipl_device_attr.attr,
+       &sys_ipl_fcp_wwpn_attr.attr,
+       &sys_ipl_fcp_lun_attr.attr,
+       &sys_ipl_fcp_bootprog_attr.attr,
+       &sys_ipl_fcp_br_lba_attr.attr,
+       &sys_ipl_ccw_loadparm_attr.attr,
+       NULL,
+};
+
+static struct attribute_group ipl_fcp_attr_group = {
+       .attrs = ipl_fcp_attrs,
+};
+
+/* CCW ipl device attributes */
+
 static struct attribute *ipl_ccw_attrs_vm[] = {
        &sys_ipl_type_attr.attr,
        &sys_ipl_device_attr.attr,
@@ -765,28 +766,10 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
 DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
                   reipl_block_fcp->ipl_info.fcp.devno);
 
-static struct attribute *reipl_fcp_attrs[] = {
-       &sys_reipl_fcp_device_attr.attr,
-       &sys_reipl_fcp_wwpn_attr.attr,
-       &sys_reipl_fcp_lun_attr.attr,
-       &sys_reipl_fcp_bootprog_attr.attr,
-       &sys_reipl_fcp_br_lba_attr.attr,
-       NULL,
-};
-
-static struct attribute_group reipl_fcp_attr_group = {
-       .attrs = reipl_fcp_attrs,
-};
-
-/* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
-       reipl_block_ccw->ipl_info.ccw.devno);
-
 static void reipl_get_ascii_loadparm(char *loadparm,
                                     struct ipl_parameter_block *ibp)
 {
-       memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
+       memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
        EBCASC(loadparm, LOADPARM_LEN);
        loadparm[LOADPARM_LEN] = 0;
        strim(loadparm);
@@ -821,13 +804,50 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
                return -EINVAL;
        }
        /* initialize loadparm with blanks */
-       memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
+       memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
        /* copy and convert to ebcdic */
-       memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
-       ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
+       memcpy(ipb->hdr.loadparm, buf, lp_len);
+       ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
        return len;
 }
 
+/* FCP wrapper */
+static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *page)
+{
+       return reipl_generic_loadparm_show(reipl_block_fcp, page);
+}
+
+static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj,
+                                       struct kobj_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       return reipl_generic_loadparm_store(reipl_block_fcp, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
+       __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show,
+                                           reipl_fcp_loadparm_store);
+
+static struct attribute *reipl_fcp_attrs[] = {
+       &sys_reipl_fcp_device_attr.attr,
+       &sys_reipl_fcp_wwpn_attr.attr,
+       &sys_reipl_fcp_lun_attr.attr,
+       &sys_reipl_fcp_bootprog_attr.attr,
+       &sys_reipl_fcp_br_lba_attr.attr,
+       &sys_reipl_fcp_loadparm_attr.attr,
+       NULL,
+};
+
+static struct attribute_group reipl_fcp_attr_group = {
+       .attrs = reipl_fcp_attrs,
+};
+
+/* CCW reipl device attributes */
+
+DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+       reipl_block_ccw->ipl_info.ccw.devno);
+
 /* NSS wrapper */
 static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
                                       struct kobj_attribute *attr, char *page)
@@ -1125,11 +1145,10 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
        /* LOADPARM */
        /* check if read scp info worked and set loadparm */
        if (sclp_ipl_info.is_valid)
-               memcpy(ipb->ipl_info.ccw.load_parm,
-                               &sclp_ipl_info.loadparm, LOADPARM_LEN);
+               memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
        else
                /* read scp info failed: set empty loadparm (EBCDIC blanks) */
-               memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
+               memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
        ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
 
        /* VM PARM */
@@ -1251,9 +1270,16 @@ static int __init reipl_fcp_init(void)
                return rc;
        }
 
-       if (ipl_info.type == IPL_TYPE_FCP)
+       if (ipl_info.type == IPL_TYPE_FCP) {
                memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
-       else {
+               /*
+                * Fix loadparm: There are systems where the (SCSI) LOADPARM
+                * is invalid in the SCSI IPL parameter block, so take it
+                * always from sclp_ipl_info.
+                */
+               memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
+                      LOADPARM_LEN);
+       } else {
                reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
                reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
                reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
@@ -1864,7 +1890,23 @@ static void __init shutdown_actions_init(void)
 
 static int __init s390_ipl_init(void)
 {
+       char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
+
        sclp_get_ipl_info(&sclp_ipl_info);
+       /*
+        * Fix loadparm: There are systems where the (SCSI) LOADPARM
+        * returned by read SCP info is invalid (contains EBCDIC blanks)
+        * when the system has been booted via diag308. In that case we use
+        * the value from diag308, if available.
+        *
+        * There are also systems where diag308 store does not work in
+        * case the system is booted from HMC. Fortunately in this case
+        * READ SCP info provides the correct value.
+        */
+       if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 &&
+           diag308_set_works)
+               memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm,
+                      LOADPARM_LEN);
        shutdown_actions_init();
        shutdown_triggers_init();
        return 0;
index 65fc3979c2f11bb037fd7f646c18a1188b4c4553..7cf18f8d4cb4fc9d281169ed8dee675f4c68b6be 100644 (file)
@@ -22,13 +22,11 @@ __kernel_clock_gettime:
        basr    %r5,0
 0:     al      %r5,21f-0b(%r5)                 /* get &_vdso_data */
        chi     %r2,__CLOCK_REALTIME
-       je      10f
+       je      11f
        chi     %r2,__CLOCK_MONOTONIC
        jne     19f
 
        /* CLOCK_MONOTONIC */
-       ltr     %r3,%r3
-       jz      9f                              /* tp == NULL */
 1:     l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     1b
@@ -67,12 +65,10 @@ __kernel_clock_gettime:
        j       6b
 8:     st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
-9:     lhi     %r2,0
+       lhi     %r2,0
        br      %r14
 
        /* CLOCK_REALTIME */
-10:    ltr     %r3,%r3                         /* tp == NULL */
-       jz      18f
 11:    l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     11b
@@ -111,7 +107,7 @@ __kernel_clock_gettime:
        j       15b
 17:    st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
-18:    lhi     %r2,0
+       lhi     %r2,0
        br      %r14
 
        /* Fallback to system call */
index 91940ed33a4ab21686f890481545cbfdd9404468..3f34e09db5f4d4d0e5f7a12b9e3acb89a72a6773 100644 (file)
@@ -21,7 +21,7 @@ __kernel_clock_gettime:
        .cfi_startproc
        larl    %r5,_vdso_data
        cghi    %r2,__CLOCK_REALTIME
-       je      4f
+       je      5f
        cghi    %r2,__CLOCK_THREAD_CPUTIME_ID
        je      9f
        cghi    %r2,-2          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
@@ -30,8 +30,6 @@ __kernel_clock_gettime:
        jne     12f
 
        /* CLOCK_MONOTONIC */
-       ltgr    %r3,%r3
-       jz      3f                              /* tp == NULL */
 0:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     0b
@@ -53,12 +51,10 @@ __kernel_clock_gettime:
        j       1b
 2:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
-3:     lghi    %r2,0
+       lghi    %r2,0
        br      %r14
 
        /* CLOCK_REALTIME */
-4:     ltr     %r3,%r3                         /* tp == NULL */
-       jz      8f
 5:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     5b
@@ -80,7 +76,7 @@ __kernel_clock_gettime:
        j       6b
 7:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
-8:     lghi    %r2,0
+       lghi    %r2,0
        br      %r14
 
        /* CLOCK_THREAD_CPUTIME_ID for this thread */
index 0c1073ed1e84c42577766c87aca3cd878f21c06e..c7235e01fd674999cc89920be9d03461b7cd4d13 100644 (file)
@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
 
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 static void __init setup_zero_pages(void)
 {
index 6a96b9a2f7a557d564e6aa79684c830cfffc6520..bbd4c2298708f8202433c4da43ffe4522813e941 100644 (file)
@@ -30,6 +30,7 @@ CONFIG_PCI_DEBUG=y
 CONFIG_PCCARD=y
 CONFIG_YENTA=y
 CONFIG_HOTPLUG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
index e741b1e36acd69d694fb793279c1c8a395f17163..df25ae774ee0d2a3fcf46448413393b673b623d9 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_CMDLINE_OVERWRITE=y
 CONFIG_CMDLINE="console=ttySC1,115200 ip=dhcp root=/dev/nfs rw nfsroot=/nfs/rootfs,rsize=1024,wsize=1024 earlyprintk=sh-sci.1"
 CONFIG_PCCARD=y
 CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index bf8daf9d9c9b9b02b2c7e7a97e2a1f958b38708f..37458f38b22093d9bf1b6468577fb39de3f54013 100644 (file)
@@ -105,6 +105,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
                get_page(page);
+               __flush_anon_page(page, addr);
+               flush_dcache_page(page);
                pages[*nr] = page;
                (*nr)++;
 
index 9d8521b8c854e97dbc922092bb7472a35e0a725c..6b68f12f29db4615fe36569b0e4d9d9fe30229be 100644 (file)
@@ -29,6 +29,7 @@ CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_SUN_OPENPROMFS=m
 CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 9d016c7017f718f2ea0c78e7babcddfd6e24b591..8c83f4b8eb15a7c314c10f9b44963b2220672d25 100644 (file)
@@ -6,10 +6,12 @@
 #define SAVE_SZ                176
 #define SCRATCH_OFF    STACK_BIAS + 128
 #define BE_PTR(label)  be,pn %xcc, label
+#define SIGN_EXTEND(reg)       sra reg, 0, reg
 #else
 #define SAVE_SZ                96
 #define SCRATCH_OFF    72
 #define BE_PTR(label)  be label
+#define SIGN_EXTEND(reg)
 #endif
 
 #define SKF_MAX_NEG_OFF        (-0x200000) /* SKF_LL_OFF from filter.h */
@@ -135,6 +137,7 @@ bpf_slow_path_byte_msh:
        save    %sp, -SAVE_SZ, %sp;                     \
        mov     %i0, %o0;                               \
        mov     r_OFF, %o1;                             \
+       SIGN_EXTEND(%o1);                               \
        call    bpf_internal_load_pointer_neg_helper;   \
         mov    (LEN), %o2;                             \
        mov     %o0, r_TMP;                             \
index 1f76c22a6a75d64a5dafd6f24da19eec6c816e96..ece4af0575e9ac98f23f496396b929479987786e 100644 (file)
@@ -184,7 +184,7 @@ do {                                                                \
         */
 #define emit_alu_K(OPCODE, K)                                  \
 do {                                                           \
-       if (K) {                                                \
+       if (K || OPCODE == AND || OPCODE == MUL) {              \
                unsigned int _insn = OPCODE;                    \
                _insn |= RS1(r_A) | RD(r_A);                    \
                if (is_simm13(K)) {                             \
@@ -234,12 +234,18 @@ do {      BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8));        \
        __emit_load8(BASE, STRUCT, FIELD, DEST);                        \
 } while (0)
 
-#define emit_ldmem(OFF, DEST)                                  \
-do {   *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST);     \
+#ifdef CONFIG_SPARC64
+#define BIAS (STACK_BIAS - 4)
+#else
+#define BIAS (-4)
+#endif
+
+#define emit_ldmem(OFF, DEST)                                          \
+do {   *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST);       \
 } while (0)
 
-#define emit_stmem(OFF, SRC)                                   \
-do {   *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC);      \
+#define emit_stmem(OFF, SRC)                                           \
+do {   *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC);        \
 } while (0)
 
 #ifdef CONFIG_SMP
@@ -615,10 +621,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
                        case BPF_ANC | SKF_AD_VLAN_TAG:
                        case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                                emit_skb_load16(vlan_tci, r_A);
-                               if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
-                                       emit_andi(r_A, VLAN_VID_MASK, r_A);
+                               if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                                       emit_alu_K(SRL, 12);
+                                       emit_andi(r_A, 1, r_A);
                                } else {
-                                       emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
+                                       emit_loadimm(~VLAN_TAG_PRESENT, r_TMP);
                                        emit_and(r_A, r_TMP, r_A);
                                }
                                break;
@@ -630,15 +637,19 @@ void bpf_jit_compile(struct bpf_prog *fp)
                                emit_loadimm(K, r_X);
                                break;
                        case BPF_LD | BPF_MEM:
+                               seen |= SEEN_MEM;
                                emit_ldmem(K * 4, r_A);
                                break;
                        case BPF_LDX | BPF_MEM:
+                               seen |= SEEN_MEM | SEEN_XREG;
                                emit_ldmem(K * 4, r_X);
                                break;
                        case BPF_ST:
+                               seen |= SEEN_MEM;
                                emit_stmem(K * 4, r_A);
                                break;
                        case BPF_STX:
+                               seen |= SEEN_MEM | SEEN_XREG;
                                emit_stmem(K * 4, r_X);
                                break;
 
index 778178f4c7d132c15f4f7700b0ad31a20ea7d24b..36327438caf0daffe826df5f4ad992885dd68cd8 100644 (file)
@@ -23,6 +23,7 @@ config X86
        def_bool y
        select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_HAS_FAST_MULTIPLIER
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
        select HAVE_AOUT if X86_32
index 7a801a310e3740fd943ba3456aec57548a1301dc..0fcd9133790c566423b313fccef95a0768bd0d07 100644 (file)
@@ -33,8 +33,7 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
 
 ifeq ($(CONFIG_EFI_STUB), y)
-       VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
-                               $(objtree)/drivers/firmware/efi/libstub/lib.a
+       VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
 endif
 
 $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE
index fc6091abedb7ff363da950a0766f45f9834cf1df..d39189ba7f8e5f03397599814ae22f7f79d7a03a 100644 (file)
@@ -183,12 +183,27 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
 static bool mem_avoid_overlap(struct mem_vector *img)
 {
        int i;
+       struct setup_data *ptr;
 
        for (i = 0; i < MEM_AVOID_MAX; i++) {
                if (mem_overlaps(img, &mem_avoid[i]))
                        return true;
        }
 
+       /* Avoid all entries in the setup_data linked list. */
+       ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
+       while (ptr) {
+               struct mem_vector avoid;
+
+               avoid.start = (u64)ptr;
+               avoid.size = sizeof(*ptr) + ptr->len;
+
+               if (mem_overlaps(img, &avoid))
+                       return true;
+
+               ptr = (struct setup_data *)(unsigned long)ptr->next;
+       }
+
        return false;
 }
 
index f277184e2ac1356686d9b200bccf54bdf7bf7f9f..de8eebd6f67c825b3f013f7bc20a228b807c181f 100644 (file)
 
 static efi_system_table_t *sys_table;
 
-struct efi_config *efi_early;
+static struct efi_config *efi_early;
+
+#define efi_call_early(f, ...)                                         \
+       efi_early->call(efi_early->f, __VA_ARGS__);
 
 #define BOOT_SERVICES(bits)                                            \
 static void setup_boot_services##bits(struct efi_config *c)            \
@@ -265,21 +268,25 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 
                offset = offsetof(typeof(*out), output_string);
                output_string = efi_early->text_output + offset;
+               out = (typeof(out))(unsigned long)efi_early->text_output;
                func = (u64 *)output_string;
 
-               efi_early->call(*func, efi_early->text_output, str);
+               efi_early->call(*func, out, str);
        } else {
                struct efi_simple_text_output_protocol_32 *out;
                u32 *func;
 
                offset = offsetof(typeof(*out), output_string);
                output_string = efi_early->text_output + offset;
+               out = (typeof(out))(unsigned long)efi_early->text_output;
                func = (u32 *)output_string;
 
-               efi_early->call(*func, efi_early->text_output, str);
+               efi_early->call(*func, out, str);
        }
 }
 
+#include "../../../../drivers/firmware/efi/libstub/efi-stub-helper.c"
+
 static void find_bits(unsigned long mask, u8 *pos, u8 *size)
 {
        u8 first, len;
@@ -360,7 +367,7 @@ free_struct:
        return status;
 }
 
-static efi_status_t
+static void
 setup_efi_pci32(struct boot_params *params, void **pci_handle,
                unsigned long size)
 {
@@ -403,8 +410,6 @@ setup_efi_pci32(struct boot_params *params, void **pci_handle,
                data = (struct setup_data *)rom;
 
        }
-
-       return status;
 }
 
 static efi_status_t
@@ -463,7 +468,7 @@ free_struct:
 
 }
 
-static efi_status_t
+static void
 setup_efi_pci64(struct boot_params *params, void **pci_handle,
                unsigned long size)
 {
@@ -506,11 +511,18 @@ setup_efi_pci64(struct boot_params *params, void **pci_handle,
                data = (struct setup_data *)rom;
 
        }
-
-       return status;
 }
 
-static efi_status_t setup_efi_pci(struct boot_params *params)
+/*
+ * There's no way to return an informative status from this function,
+ * because any analysis (and printing of error messages) needs to be
+ * done directly at the EFI function call-site.
+ *
+ * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we
+ * just didn't find any PCI devices, but there's no way to tell outside
+ * the context of the call.
+ */
+static void setup_efi_pci(struct boot_params *params)
 {
        efi_status_t status;
        void **pci_handle = NULL;
@@ -527,7 +539,7 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                                        size, (void **)&pci_handle);
 
                if (status != EFI_SUCCESS)
-                       return status;
+                       return;
 
                status = efi_call_early(locate_handle,
                                        EFI_LOCATE_BY_PROTOCOL, &pci_proto,
@@ -538,13 +550,12 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                goto free_handle;
 
        if (efi_early->is64)
-               status = setup_efi_pci64(params, pci_handle, size);
+               setup_efi_pci64(params, pci_handle, size);
        else
-               status = setup_efi_pci32(params, pci_handle, size);
+               setup_efi_pci32(params, pci_handle, size);
 
 free_handle:
        efi_call_early(free_pool, pci_handle);
-       return status;
 }
 
 static void
@@ -1032,7 +1043,6 @@ struct boot_params *make_boot_params(struct efi_config *c)
        int i;
        unsigned long ramdisk_addr;
        unsigned long ramdisk_size;
-       unsigned long initrd_addr_max;
 
        efi_early = c;
        sys_table = (efi_system_table_t *)(unsigned long)efi_early->table;
@@ -1095,15 +1105,20 @@ struct boot_params *make_boot_params(struct efi_config *c)
 
        memset(sdt, 0, sizeof(*sdt));
 
-       if (hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G)
-               initrd_addr_max = -1UL;
-       else
-               initrd_addr_max = hdr->initrd_addr_max;
-
        status = handle_cmdline_files(sys_table, image,
                                      (char *)(unsigned long)hdr->cmd_line_ptr,
-                                     "initrd=", initrd_addr_max,
+                                     "initrd=", hdr->initrd_addr_max,
                                      &ramdisk_addr, &ramdisk_size);
+
+       if (status != EFI_SUCCESS &&
+           hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) {
+               efi_printk(sys_table, "Trying to load files to higher address\n");
+               status = handle_cmdline_files(sys_table, image,
+                                     (char *)(unsigned long)hdr->cmd_line_ptr,
+                                     "initrd=", -1UL,
+                                     &ramdisk_addr, &ramdisk_size);
+       }
+
        if (status != EFI_SUCCESS)
                goto fail2;
        hdr->ramdisk_image = ramdisk_addr & 0xffffffff;
@@ -1376,10 +1391,7 @@ struct boot_params *efi_main(struct efi_config *c,
 
        setup_graphics(boot_params);
 
-       status = setup_efi_pci(boot_params);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "setup_efi_pci() failed!\n");
-       }
+       setup_efi_pci(boot_params);
 
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                sizeof(*gdt), (void **)&gdt);
index d487e727f1ec7347ea960b038428998d43a1d6bb..c88c31ecad1231bb2fc45257a8d98bafe7bc519b 100644 (file)
@@ -103,4 +103,20 @@ struct efi_uga_draw_protocol {
        void *blt;
 };
 
+struct efi_config {
+       u64 image_handle;
+       u64 table;
+       u64 allocate_pool;
+       u64 allocate_pages;
+       u64 get_memory_map;
+       u64 free_pool;
+       u64 free_pages;
+       u64 locate_handle;
+       u64 handle_protocol;
+       u64 exit_boot_services;
+       u64 text_output;
+       efi_status_t (*call)(unsigned long, ...);
+       bool is64;
+} __packed;
+
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index 888950f29fd90f09574db838251725c49e28934a..a7ccd57f19e45531744eeab841c5ac7e41101074 100644 (file)
@@ -481,7 +481,7 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
        crypto_inc(ctrblk, AES_BLOCK_SIZE);
 }
 
-#ifdef CONFIG_AS_AVX
+#if 0  /* temporary disabled due to failing crypto tests */
 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv)
 {
@@ -1522,7 +1522,7 @@ static int __init aesni_init(void)
                aesni_gcm_dec_tfm = aesni_gcm_dec;
        }
        aesni_ctr_enc_tfm = aesni_ctr_enc;
-#ifdef CONFIG_AS_AVX
+#if 0  /* temporary disabled due to failing crypto tests */
        if (cpu_has_avx) {
                /* optimize performance of ctr mode encryption transform */
                aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
index afcd35d331de6348071f65ab52e373353e225786..cfe3b954d5e41cbd96be1fdb147c164b63581814 100644 (file)
@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x)
 
 #include <asm-generic/bitops/sched.h>
 
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
 #include <asm/arch_hweight.h>
 
 #include <asm-generic/bitops/const_hweight.h>
index 044a2fd3c5fe78647d4caf9ef6f92136c499ee38..0ec241ede5a256327f9a578ac2d61d0def83b2c5 100644 (file)
@@ -159,30 +159,6 @@ static inline efi_status_t efi_thunk_set_virtual_address_map(
 }
 #endif /* CONFIG_EFI_MIXED */
 
-
-/* arch specific definitions used by the stub code */
-
-struct efi_config {
-       u64 image_handle;
-       u64 table;
-       u64 allocate_pool;
-       u64 allocate_pages;
-       u64 get_memory_map;
-       u64 free_pool;
-       u64 free_pages;
-       u64 locate_handle;
-       u64 handle_protocol;
-       u64 exit_boot_services;
-       u64 text_output;
-       efi_status_t (*call)(unsigned long, ...);
-       bool is64;
-} __packed;
-
-extern struct efi_config *efi_early;
-
-#define efi_call_early(f, ...)                                         \
-       efi_early->call(efi_early->f, __VA_ARGS__);
-
 extern bool efi_reboot_required(void);
 
 #else
index b0910f97a3eaaae432728c2d48f2cc09b3cc0b94..ffb1733ac91f21d537951dcf92d2200b4058ae11 100644 (file)
@@ -106,14 +106,14 @@ enum fixed_addresses {
        __end_of_permanent_fixed_addresses,
 
        /*
-        * 256 temporary boot-time mappings, used by early_ioremap(),
+        * 512 temporary boot-time mappings, used by early_ioremap(),
         * before ioremap() is functional.
         *
-        * If necessary we round it up to the next 256 pages boundary so
+        * If necessary we round it up to the next 512 pages boundary so
         * that we can have a single pgd entry and a single pte table:
         */
 #define NR_FIX_BTMAPS          64
-#define FIX_BTMAPS_SLOTS       4
+#define FIX_BTMAPS_SLOTS       8
 #define TOTAL_FIX_BTMAPS       (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
        FIX_BTMAP_END =
         (__end_of_permanent_fixed_addresses ^
index 478c490f36547930e7101c4ad051292810fb04db..1733ab49ac5e3f878eb7c3ad69bf23df068ea258 100644 (file)
@@ -239,6 +239,7 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; }
 static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; }
 static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; }
 static inline void mp_unmap_irq(int irq) { }
+static inline bool mp_should_keep_irq(struct device *dev) { return 1; }
 
 static inline int save_ioapic_entries(void)
 {
index 5be9063545d278bd978d8c14e67fb7be08cc937b..3874693c0e53adc3593a21374154e99941944703 100644 (file)
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
+extern pte_t level1_fixmap_pgt[512];
 extern pgd_t init_level4_pgt[];
 
 #define swapper_pg_dir init_level4_pgt
index f304773285ae360810e4290b67aa4c6f0e832ef0..f1314d0bcf0ab64c7ff65099b9b3dd835f192c72 100644 (file)
@@ -338,8 +338,10 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
         * a relative jump.
         */
        rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
-       if (abs(rel) > 0x7fffffff)
+       if (abs(rel) > 0x7fffffff) {
+               __arch_remove_optimized_kprobe(op, 0);
                return -ERANGE;
+       }
 
        buf = (u8 *)op->optinsn.insn;
 
index 2d872e08fab95f10e8b97c23b54ba1e886680369..42a2dca984b3d1816522684d97c8ac50d86193af 100644 (file)
@@ -1284,6 +1284,9 @@ static void remove_siblinginfo(int cpu)
 
        for_each_cpu(sibling, cpu_sibling_mask(cpu))
                cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
+       for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
+               cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
+       cpumask_clear(cpu_llc_shared_mask(cpu));
        cpumask_clear(cpu_sibling_mask(cpu));
        cpumask_clear(cpu_core_mask(cpu));
        c->phys_proc_id = 0;
index 167ffcac16ed1912d0c5b10761edce535909d8a6..95a427e57887a664af5f1f7d01df90a7a9dd7d34 100644 (file)
@@ -48,7 +48,9 @@ enum address_markers_idx {
        LOW_KERNEL_NR,
        VMALLOC_START_NR,
        VMEMMAP_START_NR,
+# ifdef CONFIG_X86_ESPFIX64
        ESPFIX_START_NR,
+# endif
        HIGH_KERNEL_NR,
        MODULES_VADDR_NR,
        MODULES_END_NR,
@@ -71,7 +73,9 @@ static struct addr_marker address_markers[] = {
        { PAGE_OFFSET,          "Low Kernel Mapping" },
        { VMALLOC_START,        "vmalloc() Area" },
        { VMEMMAP_START,        "Vmemmap" },
+# ifdef CONFIG_X86_ESPFIX64
        { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
+# endif
        { __START_KERNEL_map,   "High Kernel Mapping" },
        { MODULES_VADDR,        "Modules" },
        { MODULES_END,          "End Modules" },
index 25e7e1372bb26e961b580c753407edf28a320aa3..919b91205cd4be57760c50956eddb2d02dc13c45 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/sched.h>
 #include <asm/elf.h>
 
-struct __read_mostly va_alignment va_align = {
+struct va_alignment __read_mostly va_align = {
        .flags = -1,
 };
 
index c61ea57d1ba1d9e8124a039d6aec59d1163c1c75..9a2b7101ae8af6d402f15c66831bcd15b7790325 100644 (file)
@@ -326,27 +326,6 @@ static void pci_fixup_video(struct pci_dev *pdev)
        struct pci_bus *bus;
        u16 config;
 
-       if (!vga_default_device()) {
-               resource_size_t start, end;
-               int i;
-
-               /* Does firmware framebuffer belong to us? */
-               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-                       if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
-                               continue;
-
-                       start = pci_resource_start(pdev, i);
-                       end  = pci_resource_end(pdev, i);
-
-                       if (!start || !end)
-                               continue;
-
-                       if (screen_info.lfb_base >= start &&
-                           (screen_info.lfb_base + screen_info.lfb_size) < end)
-                               vga_set_default_device(pdev);
-               }
-       }
-
        /* Is VGA routed to us? */
        bus = pdev->bus;
        while (bus) {
@@ -371,8 +350,7 @@ static void pci_fixup_video(struct pci_dev *pdev)
                pci_read_config_word(pdev, PCI_COMMAND, &config);
                if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
                        pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
-                       dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
-                       vga_set_default_device(pdev);
+                       dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n");
                }
        }
 }
index e8a1201c3293bf074bedfaa7243f0e670064d457..16fb0099b7f295ed40c206e25478037f0b2ff651 100644 (file)
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
  *
  * We can construct this by grafting the Xen provided pagetable into
  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
- * means that only the kernel has a physical mapping to start with -
- * but that's enough to get __va working.  We need to fill in the rest
- * of the physical mapping once some sort of allocator has been set
- * up.
- * NOTE: for PVH, the page tables are native.
+ * level2_ident_pgt, and level2_kernel_pgt.  This means that only the
+ * kernel has a physical mapping to start with - but that's enough to
+ * get __va working.  We need to fill in the rest of the physical
+ * mapping once some sort of allocator has been set up.  NOTE: for
+ * PVH, the page tables are native.
  */
 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                /* L3_i[0] -> level2_ident_pgt */
                convert_pfn_mfn(level3_ident_pgt);
                /* L3_k[510] -> level2_kernel_pgt
-                * L3_i[511] -> level2_fixmap_pgt */
+                * L3_k[511] -> level2_fixmap_pgt */
                convert_pfn_mfn(level3_kernel_pgt);
+
+               /* L3_k[511][506] -> level1_fixmap_pgt */
+               convert_pfn_mfn(level2_fixmap_pgt);
        }
        /* We get [511][511] and have Xen's version of level2_kernel_pgt */
        l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        addr[1] = (unsigned long)l3;
        addr[2] = (unsigned long)l2;
        /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
-        * Both L4[272][0] and L4[511][511] have entries that point to the same
+        * Both L4[272][0] and L4[511][510] have entries that point to the same
         * L2 (PMD) tables. Meaning that if you modify it in __va space
         * it will be also modified in the __ka space! (But if you just
         * modify the PMD table to point to other PTE's or none, then you
         * are OK - which is what cleanup_highmap does) */
        copy_page(level2_ident_pgt, l2);
-       /* Graft it onto L4[511][511] */
+       /* Graft it onto L4[511][510] */
        copy_page(level2_kernel_pgt, l2);
 
-       /* Get [511][510] and graft that in level2_fixmap_pgt */
-       l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
-       l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-       copy_page(level2_fixmap_pgt, l2);
-       /* Note that we don't do anything with level1_fixmap_pgt which
-        * we don't need. */
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                /* Make pagetable pieces RO */
                set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
 
                /* Pin down new L4 */
                pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
index f4d27b12c90b2778182cc3c0cf5a6cd09e2b0310..9924725fa50dcac5563d2654a24df0d85cfada7a 100644 (file)
@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        bool is_pm_resume;
 
        WARN_ON(irqs_disabled());
+       WARN_ON(rq->cmd_type == REQ_TYPE_FS);
 
        rq->rq_disk = bd_disk;
        rq->end_io = done;
index 54535831f1e197a6a6c56a5cc8a4b08e90d4020b..77881798f7930bd4c1df4ea3cc4a0ab3f30ddc80 100644 (file)
 #include "blk.h"
 
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
-                                            struct bio *bio)
+                                            struct bio *bio,
+                                            bool no_sg_merge)
 {
        struct bio_vec bv, bvprv = { NULL };
-       int cluster, high, highprv = 1, no_sg_merge;
+       int cluster, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
        struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        cluster = blk_queue_cluster(q);
        seg_size = 0;
        nr_phys_segs = 0;
-       no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
        high = 0;
        for_each_bio(bio) {
                bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ new_segment:
 
 void blk_recalc_rq_segments(struct request *rq)
 {
-       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
+       bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
+                       &rq->q->queue_flags);
+
+       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
+                       no_sg_merge);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
+       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+                       bio->bi_vcnt < queue_max_segments(q))
                bio->bi_phys_segments = bio->bi_vcnt;
        else {
                struct bio *nxt = bio->bi_next;
 
                bio->bi_next = NULL;
-               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
                bio->bi_next = nxt;
        }
 
index 4aac82615a46fd2363d03d28007fc5cf6be53929..df8e1e09dd172d9ab67dbd0a91e519c5c01919ef 100644 (file)
@@ -119,7 +119,16 @@ void blk_mq_freeze_queue(struct request_queue *q)
        spin_unlock_irq(q->queue_lock);
 
        if (freeze) {
-               percpu_ref_kill(&q->mq_usage_counter);
+               /*
+                * XXX: Temporary kludge to work around SCSI blk-mq stall.
+                * SCSI synchronously creates and destroys many queues
+                * back-to-back during probe leading to lengthy stalls.
+                * This will be fixed by keeping ->mq_usage_counter in
+                * atomic mode until genhd registration, but, for now,
+                * let's work around using expedited synchronization.
+                */
+               __percpu_ref_kill_expedited(&q->mq_usage_counter);
+
                blk_mq_run_queues(q, false);
        }
        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
@@ -203,7 +212,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
        if (tag != BLK_MQ_TAG_FAIL) {
                rq = data->hctx->tags->rqs[tag];
 
-               rq->cmd_flags = 0;
                if (blk_mq_tag_busy(data->hctx)) {
                        rq->cmd_flags = REQ_MQ_INFLIGHT;
                        atomic_inc(&data->hctx->nr_active);
@@ -258,6 +266,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        if (rq->cmd_flags & REQ_MQ_INFLIGHT)
                atomic_dec(&hctx->nr_active);
+       rq->cmd_flags = 0;
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
@@ -392,6 +401,12 @@ static void blk_mq_start_request(struct request *rq, bool last)
 
        blk_add_timer(rq);
 
+       /*
+        * Ensure that ->deadline is visible before set the started
+        * flag and clear the completed flag.
+        */
+       smp_mb__before_atomic();
+
        /*
         * Mark us as started and clear complete. Complete might have been
         * set if requeue raced with timeout, which then marked it as
@@ -473,7 +488,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
                blk_mq_insert_request(rq, false, false, false);
        }
 
-       blk_mq_run_queues(q, false);
+       /*
+        * Use the start variant of queue running here, so that running
+        * the requeue work will kick stopped queues.
+        */
+       blk_mq_start_hw_queues(q);
 }
 
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
@@ -957,14 +976,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
 
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-       if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
-           !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
-               blk_insert_flush(rq);
-       } else {
-               spin_lock(&ctx->lock);
-               __blk_mq_insert_request(hctx, rq, at_head);
-               spin_unlock(&ctx->lock);
-       }
+       spin_lock(&ctx->lock);
+       __blk_mq_insert_request(hctx, rq, at_head);
+       spin_unlock(&ctx->lock);
 
        if (run_queue)
                blk_mq_run_hw_queue(hctx, async);
@@ -1321,6 +1335,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
                                continue;
                        set->ops->exit_request(set->driver_data, tags->rqs[i],
                                                hctx_idx, i);
+                       tags->rqs[i] = NULL;
                }
        }
 
@@ -1354,8 +1369,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 
        INIT_LIST_HEAD(&tags->page_list);
 
-       tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
-                                       GFP_KERNEL, set->numa_node);
+       tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+                                GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                                set->numa_node);
        if (!tags->rqs) {
                blk_mq_free_tags(tags);
                return NULL;
@@ -1379,8 +1395,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                        this_order--;
 
                do {
-                       page = alloc_pages_node(set->numa_node, GFP_KERNEL,
-                                               this_order);
+                       page = alloc_pages_node(set->numa_node,
+                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                               this_order);
                        if (page)
                                break;
                        if (!this_order--)
@@ -1401,11 +1418,15 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                left -= to_do * rq_size;
                for (j = 0; j < to_do; j++) {
                        tags->rqs[i] = p;
+                       tags->rqs[i]->atomic_flags = 0;
+                       tags->rqs[i]->cmd_flags = 0;
                        if (set->ops->init_request) {
                                if (set->ops->init_request(set->driver_data,
                                                tags->rqs[i], hctx_idx, i,
-                                               set->numa_node))
+                                               set->numa_node)) {
+                                       tags->rqs[i] = NULL;
                                        goto fail;
+                               }
                        }
 
                        p += rq_size;
@@ -1416,7 +1437,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
        return tags;
 
 fail:
-       pr_warn("%s: failed to allocate requests\n", __func__);
        blk_mq_free_rq_map(set, tags, hctx_idx);
        return NULL;
 }
@@ -1936,6 +1956,60 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       int i;
+
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               set->tags[i] = blk_mq_init_rq_map(set, i);
+               if (!set->tags[i])
+                       goto out_unwind;
+       }
+
+       return 0;
+
+out_unwind:
+       while (--i >= 0)
+               blk_mq_free_rq_map(set, set->tags[i], i);
+
+       return -ENOMEM;
+}
+
+/*
+ * Allocate the request maps associated with this tag_set. Note that this
+ * may reduce the depth asked for, if memory is tight. set->queue_depth
+ * will be updated to reflect the allocated depth.
+ */
+static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       unsigned int depth;
+       int err;
+
+       depth = set->queue_depth;
+       do {
+               err = __blk_mq_alloc_rq_maps(set);
+               if (!err)
+                       break;
+
+               set->queue_depth >>= 1;
+               if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
+                       err = -ENOMEM;
+                       break;
+               }
+       } while (set->queue_depth);
+
+       if (!set->queue_depth || err) {
+               pr_err("blk-mq: failed to allocate request map\n");
+               return -ENOMEM;
+       }
+
+       if (depth != set->queue_depth)
+               pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
+                                               depth, set->queue_depth);
+
+       return 0;
+}
+
 /*
  * Alloc a tag set to be associated with one or more request queues.
  * May fail with EINVAL for various error conditions. May adjust the
@@ -1944,8 +2018,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
  */
 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 {
-       int i;
-
        if (!set->nr_hw_queues)
                return -EINVAL;
        if (!set->queue_depth)
@@ -1966,23 +2038,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                                 sizeof(struct blk_mq_tags *),
                                 GFP_KERNEL, set->numa_node);
        if (!set->tags)
-               goto out;
+               return -ENOMEM;
 
-       for (i = 0; i < set->nr_hw_queues; i++) {
-               set->tags[i] = blk_mq_init_rq_map(set, i);
-               if (!set->tags[i])
-                       goto out_unwind;
-       }
+       if (blk_mq_alloc_rq_maps(set))
+               goto enomem;
 
        mutex_init(&set->tag_list_lock);
        INIT_LIST_HEAD(&set->tag_list);
 
        return 0;
-
-out_unwind:
-       while (--i >= 0)
-               blk_mq_free_rq_map(set, set->tags[i], i);
-out:
+enomem:
+       kfree(set->tags);
+       set->tags = NULL;
        return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1997,6 +2064,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
        }
 
        kfree(set->tags);
+       set->tags = NULL;
 }
 EXPORT_SYMBOL(blk_mq_free_tag_set);
 
index 4db5abf96b9ec1595a822efebfb9df9776af6477..17f5c84ce7bfb588a5a34bc244a7ac38067b4391 100644 (file)
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk)
         * Initialization must be complete by now.  Finish the initial
         * bypass from queue allocation.
         */
-       queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
-       blk_queue_bypass_end(q);
+       if (!blk_queue_init_done(q)) {
+               queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+               blk_queue_bypass_end(q);
+       }
 
        ret = blk_trace_init_sysfs(dev);
        if (ret)
index 791f419431322882a915f88995df7b72552d5507..e6723bd4d7a16963da3367a1d4791c6402e78a82 100644 (file)
@@ -28,10 +28,10 @@ struct kobject *block_depr;
 /* for extended dynamic devt allocation, currently only one major is used */
 #define NR_EXT_DEVT            (1 << MINORBITS)
 
-/* For extended devt allocation.  ext_devt_mutex prevents look up
+/* For extended devt allocation.  ext_devt_lock prevents look up
  * results from going away underneath its user.
  */
-static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_SPINLOCK(ext_devt_lock);
 static DEFINE_IDR(ext_devt_idr);
 
 static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        }
 
        /* allocate ext devt */
-       mutex_lock(&ext_devt_mutex);
-       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
-       mutex_unlock(&ext_devt_mutex);
+       idr_preload(GFP_KERNEL);
+
+       spin_lock(&ext_devt_lock);
+       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+       spin_unlock(&ext_devt_lock);
+
+       idr_preload_end();
        if (idx < 0)
                return idx == -ENOSPC ? -EBUSY : idx;
 
@@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
  */
 void blk_free_devt(dev_t devt)
 {
-       might_sleep();
-
        if (devt == MKDEV(0, 0))
                return;
 
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 }
 
@@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk)
                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
        device_del(disk_to_dev(disk));
-       blk_free_devt(disk_to_dev(disk)->devt);
 }
 EXPORT_SYMBOL(del_gendisk);
 
@@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
        } else {
                struct hd_struct *part;
 
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 
        return disk;
@@ -1098,6 +1099,7 @@ static void disk_release(struct device *dev)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
+       blk_free_devt(dev->devt);
        disk_release_events(disk);
        kfree(disk->random);
        disk_replace_part_tbl(disk, NULL);
index 789cdea05893bb8e3420ede5d1aa65e7af408e1b..0d9e5f97f0a8afaa397530a77684bacc7b22ab26 100644 (file)
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
 static void part_release(struct device *dev)
 {
        struct hd_struct *p = dev_to_part(dev);
+       blk_free_devt(dev->devt);
        free_part_stats(p);
        free_part_info(p);
        kfree(p);
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
        rcu_assign_pointer(ptbl->last_lookup, NULL);
        kobject_put(part->holder_dir);
        device_del(part_to_dev(part));
-       blk_free_devt(part_devt(part));
 
        hd_struct_put(part);
 }
index 7894db9ca90b1e13c742841e41b309b39584e622..a53ee099e28131f198aa48772f68b605cfdf6fc3 100644 (file)
@@ -1922,9 +1922,6 @@ static inline int __init drbg_healthcheck_sanity(void)
        /* overflow max addtllen with personalization string */
        ret = drbg_instantiate(drbg, &addtl, coreref, pr);
        BUG_ON(0 == ret);
-       /* test uninstantated DRBG */
-       len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
-       BUG_ON(0 < len);
        /* all tests passed */
        rc = 0;
 
index 2da8660262e5ccefa00e7608958adaf8c7328e9a..81dc75033f1590111d488216302f475c42b42137 100644 (file)
@@ -33,7 +33,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
                      void *handler_context, void *region_context)
 {
        int i;
-       u8 *value = (u8 *)&value64;
+       u8 *value = (u8 *)value64;
 
        if (address > 0xff || !value64)
                return AE_BAD_PARAMETER;
index 9dfec48dd4e503b82994e50d2ae7d06768f84d61..b0ea767c86968ceac331294a6f74694c486f39c2 100644 (file)
@@ -419,7 +419,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        adev->driver_data = pdata;
        pdev = acpi_create_platform_device(adev);
        if (!IS_ERR_OR_NULL(pdev)) {
-               device_enable_async_suspend(&pdev->dev);
                return 1;
        }
 
@@ -610,7 +609,7 @@ static int acpi_lpss_suspend_late(struct device *dev)
        return acpi_dev_suspend_late(dev);
 }
 
-static int acpi_lpss_restore_early(struct device *dev)
+static int acpi_lpss_resume_early(struct device *dev)
 {
        int ret = acpi_dev_resume_early(dev);
 
@@ -650,15 +649,15 @@ static int acpi_lpss_runtime_resume(struct device *dev)
 static struct dev_pm_domain acpi_lpss_pm_domain = {
        .ops = {
 #ifdef CONFIG_PM_SLEEP
-               .suspend_late = acpi_lpss_suspend_late,
-               .restore_early = acpi_lpss_restore_early,
                .prepare = acpi_subsys_prepare,
                .complete = acpi_subsys_complete,
                .suspend = acpi_subsys_suspend,
-               .resume_early = acpi_subsys_resume_early,
+               .suspend_late = acpi_lpss_suspend_late,
+               .resume_early = acpi_lpss_resume_early,
                .freeze = acpi_subsys_freeze,
                .poweroff = acpi_subsys_suspend,
-               .poweroff_late = acpi_subsys_suspend_late,
+               .poweroff_late = acpi_lpss_suspend_late,
+               .restore_early = acpi_lpss_resume_early,
 #endif
 #ifdef CONFIG_PM_RUNTIME
                .runtime_suspend = acpi_lpss_runtime_suspend,
index 1f9aba5fb81f28bf7f31d4e55d20747b2647d621..2747279fbe3c8e873e02fa9692d79416fb07f729 100644 (file)
@@ -254,6 +254,7 @@ struct acpi_create_field_info {
        u32 field_bit_position;
        u32 field_bit_length;
        u16 resource_length;
+       u16 pin_number_index;
        u8 field_flags;
        u8 attribute;
        u8 field_type;
index 22fb6449d3d61f792ac4777f1f24139b905b08ae..8abb393dafabe621a48e4195fd3ae899eca28b8b 100644 (file)
@@ -264,6 +264,7 @@ struct acpi_object_region_field {
        ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
        union acpi_operand_object *region_obj;  /* Containing op_region object */
        u8 *resource_buffer;    /* resource_template for serial regions/fields */
+       u16 pin_number_index;   /* Index relative to previous Connection/Template */
 };
 
 struct acpi_object_bank_field {
index 3661c8e90540fb8715ec7a24795394bc1609b016..c57666196672016d207805775cfada6875363e10 100644 (file)
@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                         */
                        info->resource_buffer = NULL;
                        info->connection_node = NULL;
+                       info->pin_number_index = 0;
 
                        /*
                         * A Connection() is either an actual resource descriptor (buffer)
@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                        }
 
                        info->field_bit_position += info->field_bit_length;
+                       info->pin_number_index++;       /* Index relative to previous Connection() */
                        break;
 
                default:
index 9957297d15805f2b1691d2b251d89031fd404bb5..8eb8575e8c1623fcd3bc907e180111c0aa68df55 100644 (file)
@@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        union acpi_operand_object *region_obj2;
        void *region_context = NULL;
        struct acpi_connection_info *context;
+       acpi_physical_address address;
 
        ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
 
@@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        /* We have everything we need, we can invoke the address space handler */
 
        handler = handler_desc->address_space.handler;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-                         "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
-                         &region_obj->region.handler->address_space, handler,
-                         ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
-                                                 region_offset),
-                         acpi_ut_get_region_name(region_obj->region.
-                                                 space_id)));
+       address = (region_obj->region.address + region_offset);
 
        /*
         * Special handling for generic_serial_bus and general_purpose_io:
         * There are three extra parameters that must be passed to the
         * handler via the context:
-        *   1) Connection buffer, a resource template from Connection() op.
-        *   2) Length of the above buffer.
-        *   3) Actual access length from the access_as() op.
+        *   1) Connection buffer, a resource template from Connection() op
+        *   2) Length of the above buffer
+        *   3) Actual access length from the access_as() op
+        *
+        * In addition, for general_purpose_io, the Address and bit_width fields
+        * are defined as follows:
+        *   1) Address is the pin number index of the field (bit offset from
+        *      the previous Connection)
+        *   2) bit_width is the actual bit length of the field (number of pins)
         */
-       if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
-            (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+       if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
            context && field_obj) {
 
                /* Get the Connection (resource_template) buffer */
@@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                context->length = field_obj->field.resource_length;
                context->access_length = field_obj->field.access_length;
        }
+       if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
+           context && field_obj) {
+
+               /* Get the Connection (resource_template) buffer */
+
+               context->connection = field_obj->field.resource_buffer;
+               context->length = field_obj->field.resource_length;
+               context->access_length = field_obj->field.access_length;
+               address = field_obj->field.pin_number_index;
+               bit_width = field_obj->field.bit_length;
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+                         "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+                         &region_obj->region.handler->address_space, handler,
+                         ACPI_FORMAT_NATIVE_UINT(address),
+                         acpi_ut_get_region_name(region_obj->region.
+                                                 space_id)));
 
        if (!(handler_desc->address_space.handler_flags &
              ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
@@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
 
        /* Call the handler */
 
-       status = handler(function,
-                        (region_obj->region.address + region_offset),
-                        bit_width, value, context,
+       status = handler(function, address, bit_width, value, context,
                         region_obj2->extra.region_context);
 
        if (ACPI_FAILURE(status)) {
index 6907ce0c704c8f9eb4f9c06966930f155495d933..b994845ed359bdb7b15f311ba76c45214112aea4 100644 (file)
@@ -253,6 +253,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
                buffer = &buffer_desc->integer.value;
        }
 
+       if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+           (obj_desc->field.region_obj->region.space_id ==
+            ACPI_ADR_SPACE_GPIO)) {
+               /*
+                * For GPIO (general_purpose_io), the Address will be the bit offset
+                * from the previous Connection() operator, making it effectively a
+                * pin number index. The bit_length is the length of the field, which
+                * is thus the number of pins.
+                */
+               ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+                                 "GPIO FieldRead [FROM]:  Pin %u Bits %u\n",
+                                 obj_desc->field.pin_number_index,
+                                 obj_desc->field.bit_length));
+
+               /* Lock entire transaction if requested */
+
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+               /* Perform the write */
+
+               status = acpi_ex_access_region(obj_desc, 0,
+                                              (u64 *)buffer, ACPI_READ);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+               if (ACPI_FAILURE(status)) {
+                       acpi_ut_remove_reference(buffer_desc);
+               } else {
+                       *ret_buffer_desc = buffer_desc;
+               }
+               return_ACPI_STATUS(status);
+       }
+
        ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
                          "FieldRead [TO]:   Obj %p, Type %X, Buf %p, ByteLen %X\n",
                          obj_desc, obj_desc->common.type, buffer,
@@ -413,6 +444,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
                *result_desc = buffer_desc;
                return_ACPI_STATUS(status);
+       } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+                  (obj_desc->field.region_obj->region.space_id ==
+                   ACPI_ADR_SPACE_GPIO)) {
+               /*
+                * For GPIO (general_purpose_io), we will bypass the entire field
+                * mechanism and handoff the bit address and bit width directly to
+                * the handler. The Address will be the bit offset
+                * from the previous Connection() operator, making it effectively a
+                * pin number index. The bit_length is the length of the field, which
+                * is thus the number of pins.
+                */
+               if (source_desc->common.type != ACPI_TYPE_INTEGER) {
+                       return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+               }
+
+               ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+                                 "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X  [TO]:  Pin %u Bits %u\n",
+                                 acpi_ut_get_type_name(source_desc->common.
+                                                       type),
+                                 source_desc->common.type,
+                                 (u32)source_desc->integer.value,
+                                 obj_desc->field.pin_number_index,
+                                 obj_desc->field.bit_length));
+
+               buffer = &source_desc->integer.value;
+
+               /* Lock entire transaction if requested */
+
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+               /* Perform the write */
+
+               status = acpi_ex_access_region(obj_desc, 0,
+                                              (u64 *)buffer, ACPI_WRITE);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+               return_ACPI_STATUS(status);
        }
 
        /* Get a pointer to the data to be written */
index ee3f872870bc77d0d6d599037fbbb7a735982581..118e942005e5d78ea4a98e4c4d0ff3ba5bc9224c 100644 (file)
@@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                        obj_desc->field.resource_length = info->resource_length;
                }
 
+               obj_desc->field.pin_number_index = info->pin_number_index;
+
                /* Allow full data read from EC address space */
 
                if ((obj_desc->field.region_obj->region.space_id ==
index 1c162e7be045405f267ef8d8140795508f8ea1c0..5fdfe65fe165d0a95b72e258a6c4584682f7019a 100644 (file)
@@ -534,20 +534,6 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
                        " invalid.\n");
        }
 
-       /*
-        * When fully charged, some batteries wrongly report
-        * capacity_now = design_capacity instead of = full_charge_capacity
-        */
-       if (battery->capacity_now > battery->full_charge_capacity
-           && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
-               if (battery->capacity_now != battery->design_capacity)
-                       printk_once(KERN_WARNING FW_BUG
-                               "battery: reported current charge level (%d) "
-                               "is higher than reported maximum charge level (%d).\n",
-                               battery->capacity_now, battery->full_charge_capacity);
-               battery->capacity_now = battery->full_charge_capacity;
-       }
-
        if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
            && battery->capacity_now >= 0 && battery->capacity_now <= 100)
                battery->capacity_now = (battery->capacity_now *
index 8581f5b84f48b6d2b66fa4b1f7c120f6b0928bf6..8b67bd0f6bb5e629d77de4b18490c0f64e329ad6 100644 (file)
@@ -177,16 +177,6 @@ void acpi_bus_detach_private_data(acpi_handle handle)
 }
 EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
 
-void acpi_bus_no_hotplug(acpi_handle handle)
-{
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_get_device(handle, &adev);
-       if (adev)
-               adev->flags.no_hotplug = true;
-}
-EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
-
 static void acpi_print_osc_error(acpi_handle handle,
        struct acpi_osc_context *context, char *error)
 {
index 76f7cff645945ac6bf08788a8d3670286c9750c4..c8ead9f97375342130a43aed57487ead8363133b 100644 (file)
@@ -99,6 +99,13 @@ static void container_device_detach(struct acpi_device *adev)
                device_unregister(dev);
 }
 
+static void container_device_online(struct acpi_device *adev)
+{
+       struct device *dev = acpi_driver_data(adev);
+
+       kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
 static struct acpi_scan_handler container_handler = {
        .ids = container_device_ids,
        .attach = container_device_attach,
@@ -106,6 +113,7 @@ static struct acpi_scan_handler container_handler = {
        .hotplug = {
                .enabled = true,
                .demand_offline = true,
+               .notify_online = container_device_online,
        },
 };
 
index 3bf7764659a4f5643557eb2aee4b5b327da4f88f..ae44d8654c8248c73870727098666e2b42dec6f8 100644 (file)
@@ -130,7 +130,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
        list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
                count = snprintf(&modalias[len], size, "%s:", id->id);
                if (count < 0)
-                       return EINVAL;
+                       return -EINVAL;
                if (count >= size)
                        return -ENOMEM;
                len += count;
@@ -2189,6 +2189,9 @@ static void acpi_bus_attach(struct acpi_device *device)
  ok:
        list_for_each_entry(child, &device->children, node)
                acpi_bus_attach(child);
+
+       if (device->handler && device->handler->hotplug.notify_online)
+               device->handler->hotplug.notify_online(device);
 }
 
 /**
index fcbda105616eb703b87e50ee88650cd4dde72d78..8e7e18567ae626fefd9039943662b84a627c8a58 100644 (file)
@@ -750,6 +750,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
                },
        },
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "ThinkPad X201s",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
+               },
+       },
 
        /* The native backlight controls do not work on some older machines */
        {
index a29f8012fb08483cfb67e47b9d104db1e7db4954..a0cc0edafc78e27a9b1cee2031d1a74e523425d2 100644 (file)
@@ -305,6 +305,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
+       { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+       { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -442,6 +450,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9182 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 on some Gigabyte */
@@ -1329,6 +1339,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
                ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
 
+       /*
+        * The JMicron chip 361/363 contains one SATA controller and one
+        * PATA controller,for powering on these both controllers, we must
+        * follow the sequence one by one, otherwise one of them can not be
+        * powered on successfully, so here we disable the async suspend
+        * method for these chips.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+               (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+               pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+               device_disable_async_suspend(&pdev->dev);
+
        /* acquire resources */
        rc = pcim_enable_device(pdev);
        if (rc)
index f1fef74e503cd94716dfe090ab397d83118065f5..032904402c9509af14eafdc36993e8d6eb39ac88 100644 (file)
  */
 
 #include <linux/ahci_platform.h>
-#include <linux/reset.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/fuse.h>
 #include <soc/tegra/pmc.h>
+
 #include "ahci.h"
 
 #define SATA_CONFIGURATION_0                           0x180
@@ -180,9 +183,12 @@ static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv)
 
        /* Pad calibration */
 
-       /* FIXME Always use calibration 0. Change this to read the calibration
-        * fuse once the fuse driver has landed. */
-       val = 0;
+       ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val);
+       if (ret) {
+               dev_err(&tegra->pdev->dev,
+                       "failed to read calibration fuse: %d\n", ret);
+               return ret;
+       }
 
        calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
 
index c6962300b93c7c6292ed91d62611db6678af1f49..f03aab187f4da1cf16d3f2cb34642f4fcaad7434 100644 (file)
@@ -78,6 +78,9 @@
 #define CFG_MEM_RAM_SHUTDOWN           0x00000070
 #define BLOCK_MEM_RDY                  0x00000074
 
+/* Max retry for link down */
+#define MAX_LINK_DOWN_RETRY 3
+
 struct xgene_ahci_context {
        struct ahci_host_priv *hpriv;
        struct device *dev;
@@ -145,6 +148,14 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
        return rc;
 }
 
+static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
+{
+       void __iomem *diagcsr = ctx->csr_diag;
+
+       return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
+               readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
+}
+
 /**
  * xgene_ahci_read_id - Read ID data from the specified device
  * @dev: device
@@ -229,8 +240,11 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
  * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
  * report disparity error and etc. In addition, during COMRESET, there can
  * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
- * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following
- * algorithm is followed to proper configure the hardware PHY during COMRESET:
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
+ * reboot cycle regression, sometimes the PHY reports link down even if the
+ * device is present because of speed negotiation failure. so need to retry
+ * the COMRESET to get the link up. The following algorithm is followed to
+ * proper configure the hardware PHY during COMRESET:
  *
  * Alg Part 1:
  * 1. Start the PHY at Gen3 speed (default setting)
@@ -246,9 +260,15 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
  * Alg Part 2:
  * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
  *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
- * 2. Go to Alg Part 3
+ * 2. Go to Alg Part 4
  *
  * Alg Part 3:
+ * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
+ *    communication establishment failed and maximum link down attempts are
+ *    less than Max attempts 3 then goto Alg Part 1.
+ * 2. Go to Alg Part 4.
+ *
+ * Alg Part 4:
  * 1. Clear any pending from register PORT_SCR_ERR.
  *
  * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
@@ -267,19 +287,27 @@ static int xgene_ahci_do_hardreset(struct ata_link *link,
        u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
        void __iomem *port_mmio = ahci_port_base(ap);
        struct ata_taskfile tf;
+       int link_down_retry = 0;
        int rc;
-       u32 val;
-
-       /* clear D2H reception area to properly wait for D2H FIS */
-       ata_tf_init(link->device, &tf);
-       tf.command = ATA_BUSY;
-       ata_tf_to_fis(&tf, 0, 0, d2h_fis);
-       rc = sata_link_hardreset(link, timing, deadline, online,
+       u32 val, sstatus;
+
+       do {
+               /* clear D2H reception area to properly wait for D2H FIS */
+               ata_tf_init(link->device, &tf);
+               tf.command = ATA_BUSY;
+               ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+               rc = sata_link_hardreset(link, timing, deadline, online,
                                 ahci_check_ready);
+               if (*online) {
+                       val = readl(port_mmio + PORT_SCR_ERR);
+                       if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+                               dev_warn(ctx->dev, "link has error\n");
+                       break;
+               }
 
-       val = readl(port_mmio + PORT_SCR_ERR);
-       if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
-               dev_warn(ctx->dev, "link has error\n");
+               sata_scr_read(link, SCR_STATUS, &sstatus);
+       } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
+                (sstatus & 0xff) == 0x1);
 
        /* clear all errors if any pending */
        val = readl(port_mmio + PORT_SCR_ERR);
@@ -467,6 +495,11 @@ static int xgene_ahci_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       if (xgene_ahci_is_memram_inited(ctx)) {
+               dev_info(dev, "skip clock and PHY initialization\n");
+               goto skip_clk_phy;
+       }
+
        /* Due to errata, HW requires full toggle transition */
        rc = ahci_platform_enable_clks(hpriv);
        if (rc)
@@ -479,7 +512,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
 
        /* Configure the host controller */
        xgene_ahci_hw_init(hpriv);
-
+skip_clk_phy:
        hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
 
        rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
index 893e30e9a9efab5662d5361e83051f9ac1c650c0..ffbe625e6fd2738726e77d42313aa9a09866ec4b 100644 (file)
@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
        /* SATA Controller IDE (Coleto Creek) */
        { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
 
        { }     /* terminate list */
 };
index 4d1a5d2c4287f2979c34a2913c0cb4018f573776..47e418b8c8baa0eee13b0a2843c25371a83bd338 100644 (file)
@@ -143,6 +143,18 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
        };
        const struct ata_port_info *ppi[] = { &info, NULL };
 
+       /*
+        * The JMicron chip 361/363 contains one SATA controller and one
+        * PATA controller,for powering on these both controllers, we must
+        * follow the sequence one by one, otherwise one of them can not be
+        * powered on successfully, so here we disable the async suspend
+        * method for these chips.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+               (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+               pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+               device_disable_async_suspend(&pdev->dev);
+
        return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
 }
 
index 65ea7b256b3eab603100bfe451383bb1fa339ae8..0c94b661c16f87da22f1ed90a0c1d3512bcaf69e 100644 (file)
@@ -512,7 +512,14 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
                            map, &regmap_reg_ranges_fops);
 
        if (map->max_register || regmap_readable(map, 0)) {
-               debugfs_create_file("registers", 0400, map->debugfs,
+               umode_t registers_mode;
+
+               if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS))
+                       registers_mode = 0600;
+               else
+                       registers_mode = 0400;
+
+               debugfs_create_file("registers", registers_mode, map->debugfs,
                                    map, &regmap_map_fops);
                debugfs_create_file("access", 0400, map->debugfs,
                                    map, &regmap_access_fops);
index 294a7dd2519028a21260579f6c25b5a9aff011f7..f032ed6dd459564ece3a6cf119070c72ca2991cc 100644 (file)
@@ -282,6 +282,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xA8DB */
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
index db1e9560d8a721c5165955651462c11d5b306f0a..5c8e7fe077458f31e83b0bcf65137b255e4a8336 100644 (file)
@@ -3918,7 +3918,6 @@ skip_create_disk:
        if (rv) {
                dev_err(&dd->pdev->dev,
                        "Unable to allocate request queue\n");
-               rv = -ENOMEM;
                goto block_queue_alloc_init_error;
        }
 
index a3b042c4d448dcb4ad0a21272dd8ff83527f3b9d..00d469c7f9f79742c659011088b144d5bf4b6bef 100644 (file)
@@ -462,17 +462,21 @@ static int null_add_dev(void)
        struct gendisk *disk;
        struct nullb *nullb;
        sector_t size;
+       int rv;
 
        nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
-       if (!nullb)
+       if (!nullb) {
+               rv = -ENOMEM;
                goto out;
+       }
 
        spin_lock_init(&nullb->lock);
 
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
                submit_queues = nr_online_nodes;
 
-       if (setup_queues(nullb))
+       rv = setup_queues(nullb);
+       if (rv)
                goto out_free_nullb;
 
        if (queue_mode == NULL_Q_MQ) {
@@ -484,22 +488,29 @@ static int null_add_dev(void)
                nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
                nullb->tag_set.driver_data = nullb;
 
-               if (blk_mq_alloc_tag_set(&nullb->tag_set))
+               rv = blk_mq_alloc_tag_set(&nullb->tag_set);
+               if (rv)
                        goto out_cleanup_queues;
 
                nullb->q = blk_mq_init_queue(&nullb->tag_set);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_tags;
+               }
        } else if (queue_mode == NULL_Q_BIO) {
                nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_queues;
+               }
                blk_queue_make_request(nullb->q, null_queue_bio);
                init_driver_queues(nullb);
        } else {
                nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_queues;
+               }
                blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
                blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
                init_driver_queues(nullb);
@@ -509,8 +520,10 @@ static int null_add_dev(void)
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
 
        disk = nullb->disk = alloc_disk_node(1, home_node);
-       if (!disk)
+       if (!disk) {
+               rv = -ENOMEM;
                goto out_cleanup_blk_queue;
+       }
 
        mutex_lock(&lock);
        list_add_tail(&nullb->list, &nullb_list);
@@ -544,7 +557,7 @@ out_cleanup_queues:
 out_free_nullb:
        kfree(nullb);
 out:
-       return -ENOMEM;
+       return rv;
 }
 
 static int __init null_init(void)
index 623c84145b792b9ddaa852e45c2cdbf80b1f08d5..4b97baf8afa317347ca7ff531f2d8828394084a2 100644 (file)
@@ -5087,9 +5087,11 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
        set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
        set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
 
-       rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0);
-       if (!rbd_dev->rq_wq)
+       rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
+       if (!rbd_dev->rq_wq) {
+               ret = -ENOMEM;
                goto err_out_mapping;
+       }
 
        ret = rbd_bus_add_dev(rbd_dev);
        if (ret)
index 551e01061434e723bed922b45ac8ac96cf2b232b..95254585db86aca1adc29091a28486fe821e7e53 100644 (file)
@@ -188,31 +188,31 @@ static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
 };
 
 static struct l3_masters_data omap_l3_masters[] = {
-       { 0x0 , "MPU"},
-       { 0x10, "CS_ADP"},
-       { 0x14, "xxx"},
-       { 0x20, "DSP"},
-       { 0x30, "IVAHD"},
-       { 0x40, "ISS"},
-       { 0x44, "DucatiM3"},
-       { 0x48, "FaceDetect"},
-       { 0x50, "SDMA_Rd"},
-       { 0x54, "SDMA_Wr"},
-       { 0x58, "xxx"},
-       { 0x5C, "xxx"},
-       { 0x60, "SGX"},
-       { 0x70, "DSS"},
-       { 0x80, "C2C"},
-       { 0x88, "xxx"},
-       { 0x8C, "xxx"},
-       { 0x90, "HSI"},
-       { 0xA0, "MMC1"},
-       { 0xA4, "MMC2"},
-       { 0xA8, "MMC6"},
-       { 0xB0, "UNIPRO1"},
-       { 0xC0, "USBHOSTHS"},
-       { 0xC4, "USBOTGHS"},
-       { 0xC8, "USBHOSTFS"}
+       { 0x00, "MPU"},
+       { 0x04, "CS_ADP"},
+       { 0x05, "xxx"},
+       { 0x08, "DSP"},
+       { 0x0C, "IVAHD"},
+       { 0x10, "ISS"},
+       { 0x11, "DucatiM3"},
+       { 0x12, "FaceDetect"},
+       { 0x14, "SDMA_Rd"},
+       { 0x15, "SDMA_Wr"},
+       { 0x16, "xxx"},
+       { 0x17, "xxx"},
+       { 0x18, "SGX"},
+       { 0x1C, "DSS"},
+       { 0x20, "C2C"},
+       { 0x22, "xxx"},
+       { 0x23, "xxx"},
+       { 0x24, "HSI"},
+       { 0x28, "MMC1"},
+       { 0x29, "MMC2"},
+       { 0x2A, "MMC6"},
+       { 0x2C, "UNIPRO1"},
+       { 0x30, "USBHOSTHS"},
+       { 0x31, "USBOTGHS"},
+       { 0x32, "USBHOSTFS"}
 };
 
 static struct l3_flagmux_data *omap_l3_flagmux[] = {
index 2e3139eda93b687568aeda1698c5022eb9144c64..132c9ccfdc62b382cb22a9870e44038c6495a39d 100644 (file)
@@ -36,6 +36,7 @@ struct virtrng_info {
        int index;
        bool busy;
        bool hwrng_register_done;
+       bool hwrng_removed;
 };
 
 
@@ -68,6 +69,9 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
        int ret;
        struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
 
+       if (vi->hwrng_removed)
+               return -ENODEV;
+
        if (!vi->busy) {
                vi->busy = true;
                init_completion(&vi->have_data);
@@ -137,6 +141,9 @@ static void remove_common(struct virtio_device *vdev)
 {
        struct virtrng_info *vi = vdev->priv;
 
+       vi->hwrng_removed = true;
+       vi->data_avail = 0;
+       complete(&vi->have_data);
        vdev->config->reset(vdev);
        vi->busy = false;
        if (vi->hwrng_register_done)
index 0300c46ee247517ff9967e8ffbcbfecf9f14fac9..32f7c1b36204018d0ce151601c6ca5ef6f2cf75f 100644 (file)
@@ -447,7 +447,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
        int i;
 
        num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
-       if (num_parents <= 0 || num_parents > 1)
+       if (num_parents != 2)
                return;
 
        for (i = 0; i < num_parents; ++i) {
index bac2ddf49d02ffe28529f9be43d7251fea805188..73a8d0ff530c022b3ccdbe0fc96fe3f4a1fc2f67 100644 (file)
@@ -22,7 +22,7 @@ static struct clk_onecell_data clk_data = {
        .clk_num = ARRAY_SIZE(clk),
 };
 
-static int __init efm32gg_cmu_init(struct device_node *np)
+static void __init efm32gg_cmu_init(struct device_node *np)
 {
        int i;
        void __iomem *base;
@@ -33,7 +33,7 @@ static int __init efm32gg_cmu_init(struct device_node *np)
        base = of_iomap(np, 0);
        if (!base) {
                pr_warn("Failed to map address range for efm32gg,cmu node\n");
-               return -EADDRNOTAVAIL;
+               return;
        }
 
        clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
@@ -76,6 +76,6 @@ static int __init efm32gg_cmu_init(struct device_node *np)
        clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0",
                        "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
 
-       return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
 }
 CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
index b76fa69b44cb7eb625c65723862ebc37d963b494..bacc06ff939b090bffb9ef9759ac9b9058520a98 100644 (file)
@@ -1467,6 +1467,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
 static void clk_change_rate(struct clk *clk)
 {
        struct clk *child;
+       struct hlist_node *tmp;
        unsigned long old_rate;
        unsigned long best_parent_rate = 0;
        bool skip_set_rate = false;
@@ -1502,7 +1503,11 @@ static void clk_change_rate(struct clk *clk)
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 
-       hlist_for_each_entry(child, &clk->children, child_node) {
+       /*
+        * Use safe iteration, as change_rate can actually swap parents
+        * for certain clock types.
+        */
+       hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
                /* Skip children who will be reparented to another clock */
                if (child->new_parent && child->new_parent != clk)
                        continue;
index 4032e510d9aab0188548b967b20549cf8a0eb89f..3b83b7dd78c7297b612c2c3720e8c86568ce447e 100644 (file)
@@ -1095,7 +1095,7 @@ static struct clk_branch prng_clk = {
 };
 
 static const struct freq_tbl clk_tbl_sdc[] = {
-       {    144000, P_PXO,   5, 18,625 },
+       {    200000, P_PXO,   2, 2, 125 },
        {    400000, P_PLL8,  4, 1, 240 },
        {  16000000, P_PLL8,  4, 1,   6 },
        {  17070000, P_PLL8,  1, 2,  45 },
index 0d8c6c59a75e2b00ce952c63a61eac5658c2e613..b22a2d2f21e978366313513b6cbb6f6fd80e8a6e 100644 (file)
@@ -545,7 +545,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS),
        GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
        GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
-       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
+       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
        GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
        GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
        GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
@@ -603,7 +603,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS),
        GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS),
        GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS),
-       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
        GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS),
        GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS),
        GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS),
index 4a65b410e4d5f6e0871f138c4778c195726da570..af29359677da3ad3e42d75e47bf0dfecc0621249 100644 (file)
@@ -139,9 +139,13 @@ static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                            unsigned long parent_rate)
 {
-       struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+       struct dra7_atl_desc *cdesc;
        u32 divider;
 
+       if (!hw || !rate)
+               return -EINVAL;
+
+       cdesc = to_atl_desc(hw);
        divider = ((parent_rate + rate / 2) / rate) - 1;
        if (divider > DRA7_ATL_DIVIDER_MASK)
                divider = DRA7_ATL_DIVIDER_MASK;
index e6aa10db7bba1f78116ff6cd0b02a49b65bea857..a837f703be658304b7d2a3a0e2743bd17d7077a7 100644 (file)
@@ -211,11 +211,16 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
 static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
                                   unsigned long parent_rate)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
+       struct clk_divider *divider;
        unsigned int div, value;
        unsigned long flags = 0;
        u32 val;
 
+       if (!hw || !rate)
+               return -EINVAL;
+
+       divider = to_clk_divider(hw);
+
        div = DIV_ROUND_UP(parent_rate, rate);
        value = _get_val(divider, div);
 
index d9fdeddcef96e6b29d5785cb2001e72b8f805248..61190f6b48299ae7f89dbf6933b5e3d2333d84c1 100644 (file)
@@ -1289,6 +1289,8 @@ err_get_freq:
                per_cpu(cpufreq_cpu_data, j) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+       up_write(&policy->rwsem);
+
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 err_set_policy_cpu:
@@ -1657,7 +1659,7 @@ void cpufreq_suspend(void)
                return;
 
        if (!has_target())
-               return;
+               goto suspend;
 
        pr_debug("%s: Suspending Governors\n", __func__);
 
@@ -1671,6 +1673,7 @@ void cpufreq_suspend(void)
                                policy);
        }
 
+suspend:
        cpufreq_suspended = true;
 }
 
@@ -1687,13 +1690,13 @@ void cpufreq_resume(void)
        if (!cpufreq_driver)
                return;
 
+       cpufreq_suspended = false;
+
        if (!has_target())
                return;
 
        pr_debug("%s: Resuming Governors\n", __func__);
 
-       cpufreq_suspended = false;
-
        list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
index f7a32d2326c6428f4f4e8b1000381b7dc4301e5f..773bcde893c0472b3ecf8f2e9e3a5356d532a402 100644 (file)
@@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
                goto out;
        }
 
-       freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC);
+       freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
        if (!freq_table) {
                ret = -ENOMEM;
                goto out;
index c1320528b9d0c0060247b4d025bc35116701996a..6bd69adc3c5e5dda77c1850bd94cf2da630a44f3 100644 (file)
@@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
        return cpufreq_register_driver(&integrator_driver);
 }
 
-static void __exit integrator_cpufreq_remove(struct platform_device *pdev)
+static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
 {
-       cpufreq_unregister_driver(&integrator_driver);
+       return cpufreq_unregister_driver(&integrator_driver);
 }
 
 static const struct of_device_id integrator_cpufreq_match[] = {
index 728a2d8794993defdbe1e10c15fe7ee814c528bb..4d2c8e861089a45a9fe12e1106e08e0fd7482195 100644 (file)
@@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
        u32 input_buffer;
        int cpu;
 
-       spin_lock(&pcc_lock);
        cpu = policy->cpu;
        pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
 
@@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
        freqs.old = policy->cur;
        freqs.new = target_freq;
        cpufreq_freq_transition_begin(policy, &freqs);
+       spin_lock(&pcc_lock);
 
        input_buffer = 0x1 | (((target_freq * 100)
                               / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
index 20dc848481e703b0867e6e460ff35184c705bdc5..4d4e016d755b0737368e9acef3052dd694ecf291 100644 (file)
@@ -367,6 +367,10 @@ static int ccp_crypto_init(void)
 {
        int ret;
 
+       ret = ccp_present();
+       if (ret)
+               return ret;
+
        spin_lock_init(&req_queue_lock);
        INIT_LIST_HEAD(&req_queue.cmds);
        req_queue.backlog = &req_queue.cmds;
index a7d110652a748e74ad6b562ad26fd4520426a34e..c6e6171eb6d31f764ab8e944174323f3a8ee5006 100644 (file)
@@ -54,6 +54,20 @@ static inline void ccp_del_device(struct ccp_device *ccp)
        ccp_dev = NULL;
 }
 
+/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void)
+{
+       if (ccp_get_device())
+               return 0;
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccp_present);
+
 /**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
index b707f292b377f1366962ac61c46dbd55be1ae384..65dd1ff93d3bf6b0ba5b0bf9cfb56fc5d15edcbc 100644 (file)
@@ -66,7 +66,7 @@
 #define ADF_DH895XCC_ETR_MAX_BANKS 32
 #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
 #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
-#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
 #define ADF_DH895XCC_SMIA1_MASK 0x1
 /* Error detection and correction */
 #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
index 6a9d89c93b1fdd0f7dfb4f8a68dee807efc5f29a..ae2ab14e64b30e5edf9119569d379766eba24b94 100644 (file)
@@ -362,8 +362,9 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
                        vchan_cyclic_callback(&chan->desc->vdesc);
                } else {
                        if (chan->next_sg == chan->desc->num_sgs) {
-                               chan->desc = NULL;
+                               list_del(&chan->desc->vdesc.node);
                                vchan_cookie_complete(&chan->desc->vdesc);
+                               chan->desc = NULL;
                        }
                }
        }
index 4cf7d9a950d71b2116ca96c032c6599b9dfdb1f2..bbea8243f9e806a0b0626da6c3336ef644a0dc25 100644 (file)
@@ -1017,6 +1017,11 @@ static int omap_dma_resume(struct omap_chan *c)
                return -EINVAL;
 
        if (c->paused) {
+               mb();
+
+               /* Restore channel link register */
+               omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
+
                omap_dma_start(c, c->desc);
                c->paused = false;
        }
index d8be608a9f3be733bf4d56a1e360ecc3c5471e7b..aef6a95adef546d733d3bc6cefa32d4304b61520 100644 (file)
@@ -7,4 +7,4 @@ obj-$(CONFIG_EFI_VARS_PSTORE)           += efi-pstore.o
 obj-$(CONFIG_UEFI_CPER)                        += cper.o
 obj-$(CONFIG_EFI_RUNTIME_MAP)          += runtime-map.o
 obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)     += runtime-wrappers.o
-obj-$(CONFIG_EFI_STUB)                 += libstub/
+obj-$(CONFIG_EFI_ARM_STUB)             += libstub/
index a56bb3528755377b8fd6cb29dbd5fcb85692a55d..c846a9608cbd7bb23878954feefcee6eaddf5b88 100644 (file)
@@ -22,7 +22,7 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                        unsigned long map_size, unsigned long desc_size,
                        u32 desc_ver)
 {
-       int node, prev;
+       int node, prev, num_rsv;
        int status;
        u32 fdt_val32;
        u64 fdt_val64;
@@ -73,6 +73,14 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                prev = node;
        }
 
+       /*
+        * Delete all memory reserve map entries. When booting via UEFI,
+        * kernel will use the UEFI memory map to find reserved regions.
+        */
+       num_rsv = fdt_num_mem_rsv(fdt);
+       while (num_rsv-- > 0)
+               fdt_del_mem_rsv(fdt, num_rsv);
+
        node = fdt_subnode_offset(fdt, 0, "chosen");
        if (node < 0) {
                node = fdt_add_subnode(fdt, 0, "chosen");
index d62eaaa753978598e4eb119f86179af857aaf0a7..687476fb39e311b0d09ac0a5b20458fb94e2c605 100644 (file)
@@ -377,8 +377,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
        struct gpio_chip *chip = achip->chip;
        struct acpi_resource_gpio *agpio;
        struct acpi_resource *ares;
+       int pin_index = (int)address;
        acpi_status status;
        bool pull_up;
+       int length;
        int i;
 
        status = acpi_buffer_to_resource(achip->conn_info.connection,
@@ -400,7 +402,8 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
                return AE_BAD_PARAMETER;
        }
 
-       for (i = 0; i < agpio->pin_table_length; i++) {
+       length = min(agpio->pin_table_length, (u16)(pin_index + bits));
+       for (i = pin_index; i < length; ++i) {
                unsigned pin = agpio->pin_table[i];
                struct acpi_gpio_connection *conn;
                struct gpio_desc *desc;
index 15cc0bb65dda84f8e5b8151722434f6073e61e5b..c68d037de656d68b5808df8174de7bd57bac734e 100644 (file)
@@ -413,12 +413,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
                return;
        }
 
-       irq_set_chained_handler(parent_irq, parent_handler);
        /*
         * The parent irqchip is already using the chip_data for this
         * irqchip, so our callbacks simply use the handler_data.
         */
        irq_set_handler_data(parent_irq, gpiochip);
+       irq_set_chained_handler(parent_irq, parent_handler);
 }
 EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
 
@@ -1674,7 +1674,7 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
                set_bit(FLAG_OPEN_SOURCE, &desc->flags);
 
        /* No particular flag request, return here... */
-       if (flags & GPIOD_FLAGS_BIT_DIR_SET)
+       if (!(flags & GPIOD_FLAGS_BIT_DIR_SET))
                return desc;
 
        /* Process flags */
index a2cc6be97983fe327cd4f67a4c3204cd4985691c..b792194e0d9ceb6320457c99f270a67680a2d6d8 100644 (file)
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
        uint32_t data, jreg;
+       ast_open_key(ast);
 
        if (dev->pdev->device == PCI_CHIP_AST1180) {
                ast->chip = AST1100;
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
                        }
                        ast->vga2_clone = false;
                } else {
-                       ast->chip = 2000;
+                       ast->chip = AST2000;
                        DRM_INFO("AST 2000 detected\n");
                }
        }
index 9d7346b92653436ab6e753a2802c4ad6652279aa..6b7efcf363d61df33e493326b54daa8ce6f36825 100644 (file)
@@ -250,6 +250,7 @@ static void bochs_connector_init(struct drm_device *dev)
                           DRM_MODE_CONNECTOR_VIRTUAL);
        drm_connector_helper_add(connector,
                                 &bochs_connector_connector_helper_funcs);
+       drm_connector_register(connector);
 }
 
 
index e1c5c322212901beb63cc8757305fc18efd9a4ef..c7c5a9d91fa0082b2967a55fb8614bcdb6f74a1d 100644 (file)
@@ -555,6 +555,7 @@ static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
 
        drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
 
+       drm_connector_register(connector);
        return connector;
 }
 
index dea99d92fb4a195784c288f3fbc236c00a8188c2..4b7ed52892173ea8fb595d7350f42eec20234591 100644 (file)
@@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
        BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
        BUG_ON(!validate_regs_sorted(ring));
 
-       ret = init_hash_table(ring, cmd_tables, cmd_table_count);
-       if (ret) {
-               DRM_ERROR("CMD: cmd_parser_init failed!\n");
-               fini_hash_table(ring);
-               return ret;
+       if (hash_empty(ring->cmd_hash)) {
+               ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+               if (ret) {
+                       DRM_ERROR("CMD: cmd_parser_init failed!\n");
+                       fini_hash_table(ring);
+                       return ret;
+               }
        }
 
        ring->needs_cmd_parser = true;
index 2e7f03ad5ee2e9e84e9de0588ce78d8ad67f86f2..9933c26017edae26a2bab5879e8be0543735c027 100644 (file)
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_power_domains_init_hw(dev_priv);
 
+       /*
+        * We enable some interrupt sources in our postinstall hooks, so mark
+        * interrupts as enabled _before_ actually enabling them to avoid
+        * special cases in our ordering checks.
+        */
+       dev_priv->pm._irqs_disabled = false;
+
        ret = drm_irq_install(dev, dev->pdev->irq);
        if (ret)
                goto cleanup_gem_stolen;
 
-       dev_priv->pm._irqs_disabled = false;
-
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
index 7a830eac5ba30a7859eb65313d9ee567c5d324eb..3524306d8cfbf7b9d7e93b2bd076774eb69b36bc 100644 (file)
@@ -184,6 +184,7 @@ enum hpd_pin {
                if ((1 << (domain)) & (mask))
 
 struct drm_i915_private;
+struct i915_mm_struct;
 struct i915_mmu_object;
 
 enum intel_dpll_id {
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
        struct i915_gtt gtt; /* VM representing the global address space */
 
        struct i915_gem_mm mm;
-#if defined(CONFIG_MMU_NOTIFIER)
-       DECLARE_HASHTABLE(mmu_notifiers, 7);
-#endif
+       DECLARE_HASHTABLE(mm_structs, 7);
+       struct mutex mm_lock;
 
        /* Kernel Modesetting */
 
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
                        unsigned workers :4;
 #define I915_GEM_USERPTR_MAX_WORKERS 15
 
-                       struct mm_struct *mm;
-                       struct i915_mmu_object *mn;
+                       struct i915_mm_struct *mm;
+                       struct i915_mmu_object *mmu_object;
                        struct work_struct *work;
                } userptr;
        };
index ba7f5c6bb50d1f7e5b886ea20e74ed58a9ae5247..ad55b06a3cb1c69754fe60947a183e0af6604e46 100644 (file)
@@ -1590,10 +1590,13 @@ unlock:
 out:
        switch (ret) {
        case -EIO:
-               /* If this -EIO is due to a gpu hang, give the reset code a
-                * chance to clean up the mess. Otherwise return the proper
-                * SIGBUS. */
-               if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+               /*
+                * We eat errors when the gpu is terminally wedged to avoid
+                * userspace unduly crashing (gl has no provisions for mmaps to
+                * fail). But any other -EIO isn't ours (e.g. swap in failure)
+                * and so needs to be reported.
+                */
+               if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
                        ret = VM_FAULT_SIGBUS;
                        break;
                }
index 1411613f2174cf67280e12127327e880e8772275..e42925f76b4bb807393040e60c809fa8bc84e096 100644 (file)
@@ -1310,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
        POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
 }
 
+static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+               intel_gtt_chipset_flush();
+       } else {
+               I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+               POSTING_READ(GFX_FLSH_CNTL_GEN6);
+       }
+}
+
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1326,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
                                       dev_priv->gtt.base.start,
                                       dev_priv->gtt.base.total,
                                       true);
+
+       i915_ggtt_flush(dev_priv);
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -1378,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
                gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
        }
 
-       i915_gem_chipset_flush(dev);
+       i915_ggtt_flush(dev_priv);
 }
 
 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
index fe69fc837d9ee25f33b3dbd15664c2c33b9ca721..d384139973790e09d2da3583861c7224b1596f44 100644 (file)
 #include <linux/mempolicy.h>
 #include <linux/swap.h>
 
+struct i915_mm_struct {
+       struct mm_struct *mm;
+       struct drm_device *dev;
+       struct i915_mmu_notifier *mn;
+       struct hlist_node node;
+       struct kref kref;
+       struct work_struct work;
+};
+
 #if defined(CONFIG_MMU_NOTIFIER)
 #include <linux/interval_tree.h>
 
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
        struct mmu_notifier mn;
        struct rb_root objects;
        struct list_head linear;
-       struct drm_device *dev;
-       struct mm_struct *mm;
-       struct work_struct work;
-       unsigned long count;
        unsigned long serial;
        bool has_linear;
 };
 
 struct i915_mmu_object {
-       struct i915_mmu_notifier *mmu;
+       struct i915_mmu_notifier *mn;
        struct interval_tree_node it;
        struct list_head link;
        struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
                                      unsigned long start,
                                      unsigned long end)
 {
-       struct i915_mmu_object *mmu;
+       struct i915_mmu_object *mo;
        unsigned long serial;
 
 restart:
        serial = mn->serial;
-       list_for_each_entry(mmu, &mn->linear, link) {
+       list_for_each_entry(mo, &mn->linear, link) {
                struct drm_i915_gem_object *obj;
 
-               if (mmu->it.last < start || mmu->it.start > end)
+               if (mo->it.last < start || mo->it.start > end)
                        continue;
 
-               obj = mmu->obj;
+               obj = mo->obj;
                drm_gem_object_reference(&obj->base);
                spin_unlock(&mn->lock);
 
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
 };
 
 static struct i915_mmu_notifier *
-__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_mmu_notifier *mmu;
-
-       /* Protected by dev->struct_mutex */
-       hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
-               if (mmu->mm == mm)
-                       return mmu;
-
-       return NULL;
-}
-
-static struct i915_mmu_notifier *
-i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+i915_mmu_notifier_create(struct mm_struct *mm)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_mmu_notifier *mmu;
+       struct i915_mmu_notifier *mn;
        int ret;
 
-       lockdep_assert_held(&dev->struct_mutex);
-
-       mmu = __i915_mmu_notifier_lookup(dev, mm);
-       if (mmu)
-               return mmu;
-
-       mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
-       if (mmu == NULL)
+       mn = kmalloc(sizeof(*mn), GFP_KERNEL);
+       if (mn == NULL)
                return ERR_PTR(-ENOMEM);
 
-       spin_lock_init(&mmu->lock);
-       mmu->dev = dev;
-       mmu->mn.ops = &i915_gem_userptr_notifier;
-       mmu->mm = mm;
-       mmu->objects = RB_ROOT;
-       mmu->count = 0;
-       mmu->serial = 1;
-       INIT_LIST_HEAD(&mmu->linear);
-       mmu->has_linear = false;
-
-       /* Protected by mmap_sem (write-lock) */
-       ret = __mmu_notifier_register(&mmu->mn, mm);
+       spin_lock_init(&mn->lock);
+       mn->mn.ops = &i915_gem_userptr_notifier;
+       mn->objects = RB_ROOT;
+       mn->serial = 1;
+       INIT_LIST_HEAD(&mn->linear);
+       mn->has_linear = false;
+
+        /* Protected by mmap_sem (write-lock) */
+       ret = __mmu_notifier_register(&mn->mn, mm);
        if (ret) {
-               kfree(mmu);
+               kfree(mn);
                return ERR_PTR(ret);
        }
 
-       /* Protected by dev->struct_mutex */
-       hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
-       return mmu;
+       return mn;
 }
 
-static void
-__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
 {
-       struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
-       mmu_notifier_unregister(&mmu->mn, mmu->mm);
-       kfree(mmu);
-}
-
-static void
-__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
-{
-       lockdep_assert_held(&mmu->dev->struct_mutex);
-
-       /* Protected by dev->struct_mutex */
-       hash_del(&mmu->node);
-
-       /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
-        * We enter the function holding struct_mutex, therefore we need
-        * to drop our mutex prior to calling mmu_notifier_unregister in
-        * order to prevent lock inversion (and system-wide deadlock)
-        * between the mmap_sem and struct-mutex. Hence we defer the
-        * unregistration to a workqueue where we hold no locks.
-        */
-       INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
-       schedule_work(&mmu->work);
-}
-
-static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
-{
-       if (++mmu->serial == 0)
-               mmu->serial = 1;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
-{
-       struct i915_mmu_object *mn;
-
-       list_for_each_entry(mn, &mmu->linear, link)
-               if (mn->is_linear)
-                       return true;
-
-       return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
-                     struct i915_mmu_object *mn)
-{
-       lockdep_assert_held(&mmu->dev->struct_mutex);
-
-       spin_lock(&mmu->lock);
-       list_del(&mn->link);
-       if (mn->is_linear)
-               mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
-       else
-               interval_tree_remove(&mn->it, &mmu->objects);
-       __i915_mmu_notifier_update_serial(mmu);
-       spin_unlock(&mmu->lock);
-
-       /* Protected against _add() by dev->struct_mutex */
-       if (--mmu->count == 0)
-               __i915_mmu_notifier_destroy(mmu);
+       if (++mn->serial == 0)
+               mn->serial = 1;
 }
 
 static int
-i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
-                     struct i915_mmu_object *mn)
+i915_mmu_notifier_add(struct drm_device *dev,
+                     struct i915_mmu_notifier *mn,
+                     struct i915_mmu_object *mo)
 {
        struct interval_tree_node *it;
        int ret;
 
-       ret = i915_mutex_lock_interruptible(mmu->dev);
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
         * remove the objects from the interval tree) before we do
         * the check for overlapping objects.
         */
-       i915_gem_retire_requests(mmu->dev);
+       i915_gem_retire_requests(dev);
 
-       spin_lock(&mmu->lock);
-       it = interval_tree_iter_first(&mmu->objects,
-                                     mn->it.start, mn->it.last);
+       spin_lock(&mn->lock);
+       it = interval_tree_iter_first(&mn->objects,
+                                     mo->it.start, mo->it.last);
        if (it) {
                struct drm_i915_gem_object *obj;
 
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
 
                obj = container_of(it, struct i915_mmu_object, it)->obj;
                if (!obj->userptr.workers)
-                       mmu->has_linear = mn->is_linear = true;
+                       mn->has_linear = mo->is_linear = true;
                else
                        ret = -EAGAIN;
        } else
-               interval_tree_insert(&mn->it, &mmu->objects);
+               interval_tree_insert(&mo->it, &mn->objects);
 
        if (ret == 0) {
-               list_add(&mn->link, &mmu->linear);
-               __i915_mmu_notifier_update_serial(mmu);
+               list_add(&mo->link, &mn->linear);
+               __i915_mmu_notifier_update_serial(mn);
        }
-       spin_unlock(&mmu->lock);
-       mutex_unlock(&mmu->dev->struct_mutex);
+       spin_unlock(&mn->lock);
+       mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
 
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
+{
+       struct i915_mmu_object *mo;
+
+       list_for_each_entry(mo, &mn->linear, link)
+               if (mo->is_linear)
+                       return true;
+
+       return false;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
+                     struct i915_mmu_object *mo)
+{
+       spin_lock(&mn->lock);
+       list_del(&mo->link);
+       if (mo->is_linear)
+               mn->has_linear = i915_mmu_notifier_has_linear(mn);
+       else
+               interval_tree_remove(&mo->it, &mn->objects);
+       __i915_mmu_notifier_update_serial(mn);
+       spin_unlock(&mn->lock);
+}
+
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
-       struct i915_mmu_object *mn;
+       struct i915_mmu_object *mo;
 
-       mn = obj->userptr.mn;
-       if (mn == NULL)
+       mo = obj->userptr.mmu_object;
+       if (mo == NULL)
                return;
 
-       i915_mmu_notifier_del(mn->mmu, mn);
-       obj->userptr.mn = NULL;
+       i915_mmu_notifier_del(mo->mn, mo);
+       kfree(mo);
+
+       obj->userptr.mmu_object = NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_find(struct i915_mm_struct *mm)
+{
+       if (mm->mn == NULL) {
+               down_write(&mm->mm->mmap_sem);
+               mutex_lock(&to_i915(mm->dev)->mm_lock);
+               if (mm->mn == NULL)
+                       mm->mn = i915_mmu_notifier_create(mm->mm);
+               mutex_unlock(&to_i915(mm->dev)->mm_lock);
+               up_write(&mm->mm->mmap_sem);
+       }
+       return mm->mn;
 }
 
 static int
 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
                                    unsigned flags)
 {
-       struct i915_mmu_notifier *mmu;
-       struct i915_mmu_object *mn;
+       struct i915_mmu_notifier *mn;
+       struct i915_mmu_object *mo;
        int ret;
 
        if (flags & I915_USERPTR_UNSYNCHRONIZED)
                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
 
-       down_write(&obj->userptr.mm->mmap_sem);
-       ret = i915_mutex_lock_interruptible(obj->base.dev);
-       if (ret == 0) {
-               mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
-               if (!IS_ERR(mmu))
-                       mmu->count++; /* preemptive add to act as a refcount */
-               else
-                       ret = PTR_ERR(mmu);
-               mutex_unlock(&obj->base.dev->struct_mutex);
-       }
-       up_write(&obj->userptr.mm->mmap_sem);
-       if (ret)
-               return ret;
+       if (WARN_ON(obj->userptr.mm == NULL))
+               return -EINVAL;
 
-       mn = kzalloc(sizeof(*mn), GFP_KERNEL);
-       if (mn == NULL) {
-               ret = -ENOMEM;
-               goto destroy_mmu;
-       }
+       mn = i915_mmu_notifier_find(obj->userptr.mm);
+       if (IS_ERR(mn))
+               return PTR_ERR(mn);
 
-       mn->mmu = mmu;
-       mn->it.start = obj->userptr.ptr;
-       mn->it.last = mn->it.start + obj->base.size - 1;
-       mn->obj = obj;
+       mo = kzalloc(sizeof(*mo), GFP_KERNEL);
+       if (mo == NULL)
+               return -ENOMEM;
 
-       ret = i915_mmu_notifier_add(mmu, mn);
-       if (ret)
-               goto free_mn;
+       mo->mn = mn;
+       mo->it.start = obj->userptr.ptr;
+       mo->it.last = mo->it.start + obj->base.size - 1;
+       mo->obj = obj;
 
-       obj->userptr.mn = mn;
+       ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
+       if (ret) {
+               kfree(mo);
+               return ret;
+       }
+
+       obj->userptr.mmu_object = mo;
        return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+                      struct mm_struct *mm)
+{
+       if (mn == NULL)
+               return;
 
-free_mn:
+       mmu_notifier_unregister(&mn->mn, mm);
        kfree(mn);
-destroy_mmu:
-       mutex_lock(&obj->base.dev->struct_mutex);
-       if (--mmu->count == 0)
-               __i915_mmu_notifier_destroy(mmu);
-       mutex_unlock(&obj->base.dev->struct_mutex);
-       return ret;
 }
 
 #else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 
        return 0;
 }
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+                      struct mm_struct *mm)
+{
+}
+
 #endif
 
+static struct i915_mm_struct *
+__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+{
+       struct i915_mm_struct *mm;
+
+       /* Protected by dev_priv->mm_lock */
+       hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+               if (mm->mm == real)
+                       return mm;
+
+       return NULL;
+}
+
+static int
+i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct i915_mm_struct *mm;
+       int ret = 0;
+
+       /* During release of the GEM object we hold the struct_mutex. This
+        * precludes us from calling mmput() at that time as that may be
+        * the last reference and so call exit_mmap(). exit_mmap() will
+        * attempt to reap the vma, and if we were holding a GTT mmap
+        * would then call drm_gem_vm_close() and attempt to reacquire
+        * the struct mutex. So in order to avoid that recursion, we have
+        * to defer releasing the mm reference until after we drop the
+        * struct_mutex, i.e. we need to schedule a worker to do the clean
+        * up.
+        */
+       mutex_lock(&dev_priv->mm_lock);
+       mm = __i915_mm_struct_find(dev_priv, current->mm);
+       if (mm == NULL) {
+               mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+               if (mm == NULL) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               kref_init(&mm->kref);
+               mm->dev = obj->base.dev;
+
+               mm->mm = current->mm;
+               atomic_inc(&current->mm->mm_count);
+
+               mm->mn = NULL;
+
+               /* Protected by dev_priv->mm_lock */
+               hash_add(dev_priv->mm_structs,
+                        &mm->node, (unsigned long)mm->mm);
+       } else
+               kref_get(&mm->kref);
+
+       obj->userptr.mm = mm;
+out:
+       mutex_unlock(&dev_priv->mm_lock);
+       return ret;
+}
+
+static void
+__i915_mm_struct_free__worker(struct work_struct *work)
+{
+       struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+       i915_mmu_notifier_free(mm->mn, mm->mm);
+       mmdrop(mm->mm);
+       kfree(mm);
+}
+
+static void
+__i915_mm_struct_free(struct kref *kref)
+{
+       struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+
+       /* Protected by dev_priv->mm_lock */
+       hash_del(&mm->node);
+       mutex_unlock(&to_i915(mm->dev)->mm_lock);
+
+       INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
+       schedule_work(&mm->work);
+}
+
+static void
+i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
+{
+       if (obj->userptr.mm == NULL)
+               return;
+
+       kref_put_mutex(&obj->userptr.mm->kref,
+                      __i915_mm_struct_free,
+                      &to_i915(obj->base.dev)->mm_lock);
+       obj->userptr.mm = NULL;
+}
+
 struct get_pages_work {
        struct work_struct work;
        struct drm_i915_gem_object *obj;
        struct task_struct *task;
 };
 
-
 #if IS_ENABLED(CONFIG_SWIOTLB)
 #define swiotlb_active() swiotlb_nr_tbl()
 #else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec == NULL)
                pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (pvec != NULL) {
-               struct mm_struct *mm = obj->userptr.mm;
+               struct mm_struct *mm = obj->userptr.mm->mm;
 
                down_read(&mm->mmap_sem);
                while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 
        pvec = NULL;
        pinned = 0;
-       if (obj->userptr.mm == current->mm) {
+       if (obj->userptr.mm->mm == current->mm) {
                pvec = kmalloc(num_pages*sizeof(struct page *),
                               GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
                if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
 {
        i915_gem_userptr_release__mmu_notifier(obj);
-
-       if (obj->userptr.mm) {
-               mmput(obj->userptr.mm);
-               obj->userptr.mm = NULL;
-       }
+       i915_gem_userptr_release__mm_struct(obj);
 }
 
 static int
 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 {
-       if (obj->userptr.mn)
+       if (obj->userptr.mmu_object)
                return 0;
 
        return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
                return -ENODEV;
        }
 
-       /* Allocate the new object */
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
                return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
         * at binding. This means that we need to hook into the mmu_notifier
         * in order to detect if the mmu is destroyed.
         */
-       ret = -ENOMEM;
-       if ((obj->userptr.mm = get_task_mm(current)))
+       ret = i915_gem_userptr_init__mm_struct(obj);
+       if (ret == 0)
                ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
        if (ret == 0)
                ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 int
 i915_gem_init_userptr(struct drm_device *dev)
 {
-#if defined(CONFIG_MMU_NOTIFIER)
        struct drm_i915_private *dev_priv = to_i915(dev);
-       hash_init(dev_priv->mmu_notifiers);
-#endif
+       mutex_init(&dev_priv->mm_lock);
+       hash_init(dev_priv->mm_structs);
        return 0;
 }
index e4d7607da2c482ee6e31576e746de5ca71b6e28a..f29b44c86a2f7354c29dd0ce3ca3a5cfc7ae723c 100644 (file)
 #define GFX_OP_DESTBUFFER_INFO  ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
 #define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
 #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
-#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+
+#define COLOR_BLT_CMD                  (2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD               ((2<<29)|(0x43<<22)|4)
 #define XY_SRC_COPY_BLT_CMD            ((2<<29)|(0x53<<22)|6)
 #define XY_MONO_SRC_COPY_IMM_BLT       ((2<<29)|(0x71<<22)|5)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA    (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB      (1<<20)
+#define   BLT_WRITE_A                  (2<<20)
+#define   BLT_WRITE_RGB                        (1<<20)
+#define   BLT_WRITE_RGBA               (BLT_WRITE_RGB | BLT_WRITE_A)
 #define   BLT_DEPTH_8                  (0<<24)
 #define   BLT_DEPTH_16_565             (1<<24)
 #define   BLT_DEPTH_16_1555            (2<<24)
 #define   BLT_DEPTH_32                 (3<<24)
-#define   BLT_ROP_GXCOPY               (0xcc<<16)
+#define   BLT_ROP_SRC_COPY             (0xcc<<16)
+#define   BLT_ROP_COLOR_COPY           (0xf0<<16)
 #define XY_SRC_COPY_BLT_SRC_TILED      (1<<15) /* 965+ only */
 #define XY_SRC_COPY_BLT_DST_TILED      (1<<11) /* 965+ only */
 #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
index 81d7681faa638f1c7879b5cd0aafb3f56a948adf..fdff1d420c14ab3536717607975c6cfa273fb474 100644 (file)
@@ -1631,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 
        pipe_config->adjusted_mode.flags |= flags;
 
+       if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+           tmp & DP_COLOR_RANGE_16_235)
+               pipe_config->limited_color_range = true;
+
        pipe_config->has_dp_encoder = true;
 
        intel_dp_get_m_n(crtc, pipe_config);
index f9151f6641d9ba5b2ad6ebd341853378a2132fa2..5a9de21637b7819818e1993f911432e0fdb634de 100644 (file)
@@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
                                  struct intel_crtc_config *pipe_config)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 tmp, flags = 0;
        int dotclock;
 
@@ -731,9 +732,13 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
        if (tmp & HDMI_MODE_SELECT_HDMI)
                pipe_config->has_hdmi_sink = true;
 
-       if (tmp & HDMI_MODE_SELECT_HDMI)
+       if (tmp & SDVO_AUDIO_ENABLE)
                pipe_config->has_audio = true;
 
+       if (!HAS_PCH_SPLIT(dev) &&
+           tmp & HDMI_COLOR_RANGE_16_235)
+               pipe_config->limited_color_range = true;
+
        pipe_config->adjusted_mode.flags |= flags;
 
        if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
index ca52ad2ae7d12928b8304f54f9f06de8a00f8511..d8de1d5140a7ed32a06746fe7394ab359ca05f28 100644 (file)
@@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
        return -EINVAL;
 }
 
+/*
+ * If the vendor backlight interface is not in use and ACPI backlight interface
+ * is broken, do not bother processing backlight change requests from firmware.
+ */
+static bool should_ignore_backlight_request(void)
+{
+       return acpi_video_backlight_support() &&
+              !acpi_video_verify_backlight_support();
+}
+
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
-       /*
-        * If the acpi_video interface is not supposed to be used, don't
-        * bother processing backlight level change requests from firmware.
-        */
-       if (!acpi_video_verify_backlight_support()) {
+       if (should_ignore_backlight_request()) {
                DRM_DEBUG_KMS("opregion backlight request ignored\n");
                return 0;
        }
index 16371a444426d190d73b4baddd38cd8abeae66b2..47a126a0493f811cad30474532477cfe5994734e 100644 (file)
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
 #define I830_BATCH_LIMIT (256*1024)
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
                                u64 offset, u32 len,
                                unsigned flags)
 {
+       u32 cs_offset = ring->scratch.gtt_offset;
        int ret;
 
-       if (flags & I915_DISPATCH_PINNED) {
-               ret = intel_ring_begin(ring, 4);
-               if (ret)
-                       return ret;
+       ret = intel_ring_begin(ring, 6);
+       if (ret)
+               return ret;
 
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-               intel_ring_emit(ring, offset + len - 8);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
-       } else {
-               u32 cs_offset = ring->scratch.gtt_offset;
+       /* Evict the invalid PTE TLBs */
+       intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+       intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+       intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+       intel_ring_emit(ring, cs_offset);
+       intel_ring_emit(ring, 0xdeadbeef);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
+       if ((flags & I915_DISPATCH_PINNED) == 0) {
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
 
-               ret = intel_ring_begin(ring, 9+3);
+               ret = intel_ring_begin(ring, 6 + 2);
                if (ret)
                        return ret;
-               /* Blit the batch (which has now all relocs applied) to the stable batch
-                * scratch bo area (so that the CS never stumbles over its tlb
-                * invalidation bug) ... */
-               intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
-                               XY_SRC_COPY_BLT_WRITE_ALPHA |
-                               XY_SRC_COPY_BLT_WRITE_RGB);
-               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
-               intel_ring_emit(ring, 0);
-               intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+
+               /* Blit the batch (which has now all relocs applied) to the
+                * stable batch scratch bo area (so that the CS never
+                * stumbles over its tlb invalidation bug) ...
+                */
+               intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+               intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
                intel_ring_emit(ring, cs_offset);
-               intel_ring_emit(ring, 0);
                intel_ring_emit(ring, 4096);
                intel_ring_emit(ring, offset);
+
                intel_ring_emit(ring, MI_FLUSH);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
 
                /* ... and execute it. */
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-               intel_ring_emit(ring, cs_offset + len - 8);
-               intel_ring_advance(ring);
+               offset = cs_offset;
        }
 
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_BATCH_BUFFER);
+       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+       intel_ring_emit(ring, offset + len - 8);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
        return 0;
 }
 
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
        /* Workaround batchbuffer to combat CS tlb bug. */
        if (HAS_BROKEN_CS_TLB(dev)) {
-               obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+               obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
                if (obj == NULL) {
                        DRM_ERROR("Failed to allocate batch bo\n");
                        return -ENOMEM;
index c69d3ce1b3d69a6f7e9c943ed5be3dc68decdde7..c14341ca3ef9386a0be1b432206f00c15ddae09b 100644 (file)
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+       intel_wait_for_vblank(encoder->base.dev,
+                             to_intel_crtc(encoder->base.crtc)->pipe);
+
        I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
 }
 
index a125a7e32742c3707bb3e0446a4fdd8f64fffd3e..c6c9b02e0ada9f052249a556a9e2169aa967b91d 100644 (file)
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
        priv->hdmi_pdev = pdev;
 }
 
+#ifdef CONFIG_OF
+static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+{
+       int gpio = of_get_named_gpio(of_node, name, 0);
+       if (gpio < 0) {
+               char name2[32];
+               snprintf(name2, sizeof(name2), "%s-gpio", name);
+               gpio = of_get_named_gpio(of_node, name2, 0);
+               if (gpio < 0) {
+                       dev_err(dev, "failed to get gpio: %s (%d)\n",
+                                       name, gpio);
+                       gpio = -1;
+               }
+       }
+       return gpio;
+}
+#endif
+
 static int hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        static struct hdmi_platform_config config = {};
 #ifdef CONFIG_OF
        struct device_node *of_node = dev->of_node;
 
-       int get_gpio(const char *name)
-       {
-               int gpio = of_get_named_gpio(of_node, name, 0);
-               if (gpio < 0) {
-                       char name2[32];
-                       snprintf(name2, sizeof(name2), "%s-gpio", name);
-                       gpio = of_get_named_gpio(of_node, name2, 0);
-                       if (gpio < 0) {
-                               dev_err(dev, "failed to get gpio: %s (%d)\n",
-                                               name, gpio);
-                               gpio = -1;
-                       }
-               }
-               return gpio;
-       }
-
        if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
                static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
                static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        }
 
        config.mmio_name     = "core_physical";
-       config.ddc_clk_gpio  = get_gpio("qcom,hdmi-tx-ddc-clk");
-       config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
-       config.hpd_gpio      = get_gpio("qcom,hdmi-tx-hpd");
-       config.mux_en_gpio   = get_gpio("qcom,hdmi-tx-mux-en");
-       config.mux_sel_gpio  = get_gpio("qcom,hdmi-tx-mux-sel");
-       config.mux_lpm_gpio  = get_gpio("qcom,hdmi-tx-mux-lpm");
+       config.ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
+       config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
+       config.hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
+       config.mux_en_gpio   = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
+       config.mux_sel_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
+       config.mux_lpm_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
 
 #else
        static const char *hpd_clk_names[] = {
index 902d7685d441f7dca81096626fc6f047f29ca218..f408b69486a8eb83d299abc636356187d2d467bb 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#ifdef CONFIG_COMMON_CLK
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#endif
 
 #include "hdmi.h"
 
 struct hdmi_phy_8960 {
        struct hdmi_phy base;
        struct hdmi *hdmi;
+#ifdef CONFIG_COMMON_CLK
        struct clk_hw pll_hw;
        struct clk *pll;
        unsigned long pixclk;
+#endif
 };
 #define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+#ifdef CONFIG_COMMON_CLK
 #define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
 
 /*
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
        .parent_names = hdmi_pll_parents,
        .num_parents = ARRAY_SIZE(hdmi_pll_parents),
 };
-
+#endif
 
 /*
  * HDMI Phy:
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
 {
        struct hdmi_phy_8960 *phy_8960;
        struct hdmi_phy *phy = NULL;
-       int ret, i;
+       int ret;
+#ifdef CONFIG_COMMON_CLK
+       int i;
 
        /* sanity check: */
        for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
                if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
                        return ERR_PTR(-EINVAL);
+#endif
 
        phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
        if (!phy_8960) {
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
 
        phy_8960->hdmi = hdmi;
 
+#ifdef CONFIG_COMMON_CLK
        phy_8960->pll_hw.init = &pll_init;
        phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
        if (IS_ERR(phy_8960->pll)) {
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
                phy_8960->pll = NULL;
                goto fail;
        }
+#endif
 
        return phy;
 
index 26ee80db17af9fec7308f2e4defbac06f6dd6d3d..fcf95680413d5da99c86fed5a44c9c6386ba3350 100644 (file)
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
 #define reglog 0
 #endif
 
-static char *vram;
+static char *vram = "16m";
 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
 module_param(vram, charp, 0);
 
index 4b5bb5d58a548f0c0689448579753cbe4c980ec8..f8cbb512132fdcd224f6dda223689de4a4bb4350 100644 (file)
@@ -1763,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp
        const int   or = ffs(outp->or) - 1;
        const u32 loff = (or * 0x800) + (link * 0x80);
        const u16 mask = (outp->sorconf.link << 6) | outp->or;
+       struct dcb_output match;
        u8  ver, hdr;
 
-       if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+       if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
                nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
 }
 
index 0a44459844e30958e8710af20265889fc1fec5ab..05a278bab2477f0b1d12f25d73e3c47b1a054873 100644 (file)
@@ -200,7 +200,6 @@ nvc0_bar_init(struct nouveau_object *object)
 
        nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
        nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-       nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
 
        nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
        if (priv->bar[0].mem)
index b19a2b3c10813fb9327e415f1985afc76d8c65ee..32f28dc73ef23465cb0cb83c43818bd2154809da 100644 (file)
@@ -60,6 +60,7 @@ nvc0_fb_init(struct nouveau_object *object)
 
        if (priv->r100c10_page)
                nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+       nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
        return 0;
 }
 
index b54b582e72c4764e040769718d6dca566f1bb840..d5d65285efe59188072f90fc020939d14c3f1dec 100644 (file)
@@ -98,6 +98,7 @@ static int
 gf100_ltc_init(struct nouveau_object *object)
 {
        struct nvkm_ltc_priv *priv = (void *)object;
+       u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
 
        ret = nvkm_ltc_init(priv);
@@ -107,6 +108,7 @@ gf100_ltc_init(struct nouveau_object *object)
        nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
        nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
        nv_wr32(priv, 0x17e8d4, priv->tag_base);
+       nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
 }
 
index ea716569745df755d28e4acaf0c624132e5a95c4..b39b5d0eb8f91f82180e53cc2a35707207ce0258 100644 (file)
@@ -28,6 +28,7 @@ static int
 gk104_ltc_init(struct nouveau_object *object)
 {
        struct nvkm_ltc_priv *priv = (void *)object;
+       u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
 
        ret = nvkm_ltc_init(priv);
@@ -37,6 +38,7 @@ gk104_ltc_init(struct nouveau_object *object)
        nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
        nv_wr32(priv, 0x17e000, priv->ltc_nr);
        nv_wr32(priv, 0x17e8d4, priv->tag_base);
+       nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
 }
 
index 4761b2e9af0049f65c2a001ad3b96a4450396e4b..a4de64289762fc9cfe82ab40e424b4fba67fe0f0 100644 (file)
@@ -98,6 +98,7 @@ static int
 gm107_ltc_init(struct nouveau_object *object)
 {
        struct nvkm_ltc_priv *priv = (void *)object;
+       u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
 
        ret = nvkm_ltc_init(priv);
@@ -106,6 +107,7 @@ gm107_ltc_init(struct nouveau_object *object)
 
        nv_wr32(priv, 0x17e27c, priv->ltc_nr);
        nv_wr32(priv, 0x17e278, priv->tag_base);
+       nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
 }
 
index 279206997e5cd7d091d44e26f84ce5efdfb06f1d..622424692b3bddea7fb37e728e32909865da17ee 100644 (file)
@@ -46,7 +46,6 @@ static struct nouveau_dsm_priv {
        bool dsm_detected;
        bool optimus_detected;
        acpi_handle dhandle;
-       acpi_handle other_handle;
        acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
@@ -222,10 +221,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
        if (!dhandle)
                return false;
 
-       if (!acpi_has_method(dhandle, "_DSM")) {
-               nouveau_dsm_priv.other_handle = dhandle;
+       if (!acpi_has_method(dhandle, "_DSM"))
                return false;
-       }
+
        if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
                           1 << NOUVEAU_DSM_POWER))
                retval |= NOUVEAU_DSM_HAS_MUX;
@@ -301,16 +299,6 @@ static bool nouveau_dsm_detect(void)
                printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
                        acpi_method_name);
                nouveau_dsm_priv.dsm_detected = true;
-               /*
-                * On some systems hotplug events are generated for the device
-                * being switched off when _DSM is executed.  They cause ACPI
-                * hotplug to trigger and attempt to remove the device from
-                * the system, which causes it to break down.  Prevent that from
-                * happening by setting the no_hotplug flag for the involved
-                * ACPI device objects.
-                */
-               acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
-               acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
                ret = true;
        }
 
index 99cd9e4a2aa6efde5c4c94154070cf593f552fb3..3440fc999f2f3290b3557ea495a26afb65fda874 100644 (file)
@@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        struct nouveau_software_chan *swch;
        struct nv_dma_v0 args = {};
        int ret, i;
+       bool save;
 
        nvif_object_map(chan->object);
 
@@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        }
 
        /* initialise synchronisation */
-       return nouveau_fence(chan->drm)->context_new(chan);
+       save = cli->base.super;
+       cli->base.super = true; /* hack until fencenv50 fixed */
+       ret = nouveau_fence(chan->drm)->context_new(chan);
+       cli->base.super = save;
+       return ret;
 }
 
 int
index 65b4fd53dd4e13186c2d33a072b380e2b8a1fdd1..4a21b2b06ce29beece2bcd102d30f005fcbc1307 100644 (file)
@@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev)
 }
 
 int
-nouveau_display_suspend(struct drm_device *dev)
+nouveau_display_suspend(struct drm_device *dev, bool runtime)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_crtc *crtc;
 
        nouveau_display_fini(dev);
 
-       NV_INFO(drm, "unpinning framebuffer(s)...\n");
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
 
@@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev)
 }
 
 void
-nouveau_display_repin(struct drm_device *dev)
+nouveau_display_resume(struct drm_device *dev, bool runtime)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_crtc *crtc;
-       int ret;
+       int ret, head;
 
+       /* re-pin fb/cursors */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
 
@@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev)
                if (ret)
                        NV_ERROR(drm, "Could not pin/map cursor.\n");
        }
-}
-
-void
-nouveau_display_resume(struct drm_device *dev)
-{
-       struct drm_crtc *crtc;
-       int head;
 
        nouveau_display_init(dev);
 
@@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev)
        for (head = 0; head < dev->mode_config.num_crtc; head++)
                drm_vblank_on(dev, head);
 
+       /* This should ensure we don't hit a locking problem when someone
+        * wakes us up via a connector.  We should never go into suspend
+        * while the display is on anyways.
+        */
+       if (runtime)
+               return;
+
        drm_helper_resume_force_mode(dev);
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
index 88ca177cb1c7e6653ad302bc2f91c9b2991b21cc..be3d5947c6be0a6a51fa1730d41501c49fb582c2 100644 (file)
@@ -63,9 +63,8 @@ int  nouveau_display_create(struct drm_device *dev);
 void nouveau_display_destroy(struct drm_device *dev);
 int  nouveau_display_init(struct drm_device *dev);
 void nouveau_display_fini(struct drm_device *dev);
-int  nouveau_display_suspend(struct drm_device *dev);
-void nouveau_display_repin(struct drm_device *dev);
-void nouveau_display_resume(struct drm_device *dev);
+int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
+void nouveau_display_resume(struct drm_device *dev, bool runtime);
 int  nouveau_display_vblank_enable(struct drm_device *, int);
 void nouveau_display_vblank_disable(struct drm_device *, int);
 int  nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
index 250a5e88c7516f78aba08cf39b6a0b7696072fb4..3ed32dd9030364cd2a366726f9e6af09b05f21e3 100644 (file)
@@ -547,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
        struct nouveau_cli *cli;
        int ret;
 
-       if (dev->mode_config.num_crtc && !runtime) {
+       if (dev->mode_config.num_crtc) {
+               NV_INFO(drm, "suspending console...\n");
+               nouveau_fbcon_set_suspend(dev, 1);
                NV_INFO(drm, "suspending display...\n");
-               ret = nouveau_display_suspend(dev);
+               ret = nouveau_display_suspend(dev, runtime);
                if (ret)
                        return ret;
        }
@@ -603,7 +605,7 @@ fail_client:
 fail_display:
        if (dev->mode_config.num_crtc) {
                NV_INFO(drm, "resuming display...\n");
-               nouveau_display_resume(dev);
+               nouveau_display_resume(dev, runtime);
        }
        return ret;
 }
@@ -618,21 +620,19 @@ int nouveau_pmops_suspend(struct device *dev)
            drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
                return 0;
 
-       if (drm_dev->mode_config.num_crtc)
-               nouveau_fbcon_set_suspend(drm_dev, 1);
-
        ret = nouveau_do_suspend(drm_dev, false);
        if (ret)
                return ret;
 
        pci_save_state(pdev);
        pci_disable_device(pdev);
+       pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
        return 0;
 }
 
 static int
-nouveau_do_resume(struct drm_device *dev)
+nouveau_do_resume(struct drm_device *dev, bool runtime)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_cli *cli;
@@ -657,7 +657,9 @@ nouveau_do_resume(struct drm_device *dev)
 
        if (dev->mode_config.num_crtc) {
                NV_INFO(drm, "resuming display...\n");
-               nouveau_display_repin(dev);
+               nouveau_display_resume(dev, runtime);
+               NV_INFO(drm, "resuming console...\n");
+               nouveau_fbcon_set_suspend(dev, 0);
        }
 
        return 0;
@@ -680,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev)
                return ret;
        pci_set_master(pdev);
 
-       ret = nouveau_do_resume(drm_dev);
-       if (ret)
-               return ret;
-
-       if (drm_dev->mode_config.num_crtc) {
-               nouveau_display_resume(drm_dev);
-               nouveau_fbcon_set_suspend(drm_dev, 0);
-       }
-
-       return 0;
+       return nouveau_do_resume(drm_dev, false);
 }
 
 static int nouveau_pmops_freeze(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       int ret;
-
-       if (drm_dev->mode_config.num_crtc)
-               nouveau_fbcon_set_suspend(drm_dev, 1);
-
-       ret = nouveau_do_suspend(drm_dev, false);
-       return ret;
+       return nouveau_do_suspend(drm_dev, false);
 }
 
 static int nouveau_pmops_thaw(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       int ret;
-
-       ret = nouveau_do_resume(drm_dev);
-       if (ret)
-               return ret;
-
-       if (drm_dev->mode_config.num_crtc) {
-               nouveau_display_resume(drm_dev);
-               nouveau_fbcon_set_suspend(drm_dev, 0);
-       }
-
-       return 0;
+       return nouveau_do_resume(drm_dev, false);
 }
 
 
@@ -976,7 +952,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
                return ret;
        pci_set_master(pdev);
 
-       ret = nouveau_do_resume(drm_dev);
+       ret = nouveau_do_resume(drm_dev, true);
        drm_kms_helper_poll_enable(drm_dev);
        /* do magic */
        nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
index 8bdd27091db8fd1aa7a34a3174b625c6a6d2a830..49fe6075cc7c50c79d47d2ac24c273e536ac3159 100644 (file)
@@ -486,6 +486,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
        .fb_probe = nouveau_fbcon_create,
 };
 
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+       struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work);
+       console_lock();
+       nouveau_fbcon_accel_restore(fbcon->dev);
+       nouveau_fbcon_zfill(fbcon->dev, fbcon);
+       fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING);
+       console_unlock();
+}
 
 int
 nouveau_fbcon_init(struct drm_device *dev)
@@ -503,6 +513,7 @@ nouveau_fbcon_init(struct drm_device *dev)
        if (!fbcon)
                return -ENOMEM;
 
+       INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work);
        fbcon->dev = dev;
        drm->fbcon = fbcon;
 
@@ -551,14 +562,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        if (drm->fbcon) {
-               console_lock();
-               if (state == 0) {
-                       nouveau_fbcon_accel_restore(dev);
-                       nouveau_fbcon_zfill(dev, drm->fbcon);
+               if (state == FBINFO_STATE_RUNNING) {
+                       schedule_work(&drm->fbcon->work);
+                       return;
                }
+               flush_work(&drm->fbcon->work);
+               console_lock();
                fb_set_suspend(drm->fbcon->helper.fbdev, state);
-               if (state == 1)
-                       nouveau_fbcon_accel_save_disable(dev);
+               nouveau_fbcon_accel_save_disable(dev);
                console_unlock();
        }
 }
index 34658cfa8f5d4219c6bbc86bc04abf165271e9bd..0b465c7d3907cb1a07667b667032dbe3b052c71b 100644 (file)
@@ -36,6 +36,7 @@ struct nouveau_fbdev {
        struct nouveau_framebuffer nouveau_fb;
        struct list_head fbdev_list;
        struct drm_device *dev;
+       struct work_struct work;
        unsigned int saved_flags;
        struct nvif_object surf2d;
        struct nvif_object clip;
index 18d55d4472483ea5ef43c72087807eb0971ec9b4..c7592ec8ecb8e430a49e836e05d64914dd2693ac 100644 (file)
@@ -108,7 +108,16 @@ void
 nouveau_vga_fini(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
+       bool runtime = false;
+
+       if (nouveau_runtime_pm == 1)
+               runtime = true;
+       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
+               runtime = true;
+
        vga_switcheroo_unregister_client(dev->pdev);
+       if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+               vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 }
 
index b1e11f8434e28badd30572219b1d095a6a06adc4..ac14b67621d36f7a068de85103e06ba644915ea3 100644 (file)
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
        u8 msg[DP_DPCD_SIZE];
        int ret;
 
-       char dpcd_hex_dump[DP_DPCD_SIZE * 3];
-
        ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
                               DP_DPCD_SIZE);
        if (ret > 0) {
                memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-               hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
-                                  32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
-               DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+               DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+                             dig_connector->dpcd);
 
                radeon_dp_probe_oui(radeon_connector);
 
index fa9565957f9d4516ab801e645b5ac2153db535ad..3d546c606b43b6fd366a1257c751b16b5aaa2df5 100644 (file)
@@ -4803,7 +4803,7 @@ struct bonaire_mqd
  */
 static int cik_cp_compute_resume(struct radeon_device *rdev)
 {
-       int r, i, idx;
+       int r, i, j, idx;
        u32 tmp;
        bool use_doorbell = true;
        u64 hqd_gpu_addr;
@@ -4922,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                mqd->queue_state.cp_hqd_pq_wptr= 0;
                if (RREG32(CP_HQD_ACTIVE) & 1) {
                        WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
-                       for (i = 0; i < rdev->usec_timeout; i++) {
+                       for (j = 0; j < rdev->usec_timeout; j++) {
                                if (!(RREG32(CP_HQD_ACTIVE) & 1))
                                        break;
                                udelay(1);
@@ -7751,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -8251,6 +8251,7 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
@@ -8259,7 +8260,6 @@ restart_ih:
        if (queue_thermal)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index 192278bc993ce581e42b9eb2e47a207533a45cd5..c4ffa54b1e3df503dc5dd9645765b2f0927e87b2 100644 (file)
@@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
 {
        int r;
 
-       /* Reset dma */
-       WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
-       RREG32(SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(SRBM_SOFT_RESET, 0);
-       RREG32(SRBM_SOFT_RESET);
-
        r = cik_sdma_load_microcode(rdev);
        if (r)
                return r;
index dbca60c7d097664e381d9bd5c6539d140aa72c16..e50807c29f696a6b8eb23cf1e5dd9ab7f58412ca 100644 (file)
@@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -5137,6 +5137,7 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
@@ -5145,7 +5146,6 @@ restart_ih:
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index 8b58e11b64fa5a13611941b3aca6b04d2c1600a0..67cb472d188ce05ec36c5a812ae5751fbb618254 100644 (file)
@@ -33,6 +33,8 @@
 #define KV_MINIMUM_ENGINE_CLOCK         800
 #define SMC_RAM_END                     0x40000
 
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+                           bool enable);
 static void kv_init_graphics_levels(struct radeon_device *rdev);
 static int kv_calculate_ds_divider(struct radeon_device *rdev);
 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
@@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
 {
        kv_smc_bapm_enable(rdev, false);
 
+       if (rdev->family == CHIP_MULLINS)
+               kv_enable_nb_dpm(rdev, false);
+
        /* powerup blocks */
        kv_dpm_powergate_acp(rdev, false);
        kv_dpm_powergate_samu(rdev, false);
@@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
        return ret;
 }
 
-static int kv_enable_nb_dpm(struct radeon_device *rdev)
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+                           bool enable)
 {
        struct kv_power_info *pi = kv_get_pi(rdev);
        int ret = 0;
 
-       if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
-               ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
-               if (ret == 0)
-                       pi->nb_dpm_enabled = true;
+       if (enable) {
+               if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+                       ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+                       if (ret == 0)
+                               pi->nb_dpm_enabled = true;
+               }
+       } else {
+               if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
+                       ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
+                       if (ret == 0)
+                               pi->nb_dpm_enabled = false;
+               }
        }
 
        return ret;
@@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                        }
                        kv_update_sclk_t(rdev);
                        if (rdev->family == CHIP_MULLINS)
-                               kv_enable_nb_dpm(rdev);
+                               kv_enable_nb_dpm(rdev, true);
                }
        } else {
                if (pi->enable_dpm) {
@@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                        }
                        kv_update_acp_boot_level(rdev);
                        kv_update_sclk_t(rdev);
-                       kv_enable_nb_dpm(rdev);
+                       kv_enable_nb_dpm(rdev, true);
                }
        }
 
index 8a3e6221cece2c9eb09ca94a2d8b749dd6ea60e1..f26f0a9fb522ae4671740a7decd4ec18455656b0 100644 (file)
@@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
        u32 reg_offset, wb_offset;
        int i, r;
 
-       /* Reset dma */
-       WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
-       RREG32(SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(SRBM_SOFT_RESET, 0);
-
        for (i = 0; i < 2; i++) {
                if (i == 0) {
                        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
index 4c5ec44ff328b3c63203e4c7c92f5283126e65e9..b0098e792e62337a6b4d511ca62baa7f37bc4eba 100644 (file)
@@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
                return RREG32(RADEON_CRTC2_CRNT_FRAME);
 }
 
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+                               RADEON_HDP_READ_BUFFER_INVALIDATE);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
 /* Who ever call radeon_fence_emit should call ring_lock and ask
  * for enough space (today caller are ib schedule and buffer move) */
 void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
        (void)RREG32(RADEON_CP_RB_WPTR);
 }
 
-/**
- * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
- * rdev: radeon device structure
- * ring: ring buffer struct for emitting packets
- */
-void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
-{
-       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
-                               RADEON_HDP_READ_BUFFER_INVALIDATE);
-       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
-}
-
 static void r100_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
index e616eb5f6e7a4076979650147acad69b9b40d567..ea5c9af722ef9f5d322d61a9d0ff628f8daa151f 100644 (file)
@@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, lower_32_bits(addr));
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 
-       /* PFP_SYNC_ME packet only exists on 7xx+ */
-       if (emit_wait && (rdev->family >= CHIP_RV770)) {
+       /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
+       if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
                /* Prevent the PFP from running ahead of the semaphore wait */
                radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
                radeon_ring_write(ring, 0x0);
@@ -3792,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -4048,6 +4048,7 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
@@ -4056,7 +4057,6 @@ restart_ih:
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index 51fd98553eaf6c4a34e73c5baeecf95b94220e29..a908daa006d23980bd51fe50d93e94ea3464aac5 100644 (file)
@@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
        u32 rb_bufsz;
        int r;
 
-       /* Reset dma */
-       if (rdev->family >= CHIP_RV770)
-               WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
-       else
-               WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
-       RREG32(SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(SRBM_SOFT_RESET, 0);
-
        WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
        WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
 
index 0c4a7d8d93e0465e9f2d094fe127f283602b3239..31e1052ad3e3780cec522ab8a2e04bb62698c573 100644 (file)
 #define R6XX_MAX_PIPES                         8
 #define R6XX_MAX_PIPES_MASK                    0xff
 
-/* PTE flags */
-#define PTE_VALID                              (1 << 0)
-#define PTE_SYSTEM                             (1 << 1)
-#define PTE_SNOOPED                            (1 << 2)
-#define PTE_READABLE                           (1 << 5)
-#define PTE_WRITEABLE                          (1 << 6)
-
 /* tiling bits */
 #define     ARRAY_LINEAR_GENERAL              0x00000000
 #define     ARRAY_LINEAR_ALIGNED              0x00000001
index 5f05b4c8433807bf26a1b0ef80fd31a2c25d9a36..3247bfd144106d54fa24161f898f24ae538f9a3c 100644 (file)
@@ -106,6 +106,7 @@ extern int radeon_vm_block_size;
 extern int radeon_deep_color;
 extern int radeon_use_pflipirq;
 extern int radeon_bapm;
+extern int radeon_backlight;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
index eeeeabe09758ddccf2175ad48d9b9bd62e896e83..2dd5847f9b98e8a428e63b9ff9563f52b94d2709 100644 (file)
@@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
-       .hdp_flush = &r100_ring_hdp_flush,
 };
 
 static struct radeon_asic r100_asic = {
@@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
-       .hdp_flush = &r100_ring_hdp_flush,
 };
 
 static struct radeon_asic r300_asic = {
index 275a5dc01780f7bedbed2cb3486c4c3d383710eb..7756bc1e1cd3ef088ac4bb96fa04a9ab632ad088 100644 (file)
@@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
                      struct radeon_ring *ring);
 void r100_gfx_set_wptr(struct radeon_device *rdev,
                       struct radeon_ring *ring);
-void r100_ring_hdp_flush(struct radeon_device *rdev,
-                        struct radeon_ring *ring);
+
 /*
  * r200,rv250,rs300,rv280
  */
index 92b2d8dd47355f7f7b601025082868d3e29e60be..e74c7e387ddebfbb74d32866b39d2046d2632540 100644 (file)
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
+       /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+       if ((dev->pdev->device == 0x9805) &&
+           (dev->pdev->subsystem_vendor == 0x1734) &&
+           (dev->pdev->subsystem_device == 0x11bd)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+                       return false;
+       }
 
        return true;
 }
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
                                 (controller->ucFanParameters &
                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
                        rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
-               } else if ((controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
-                          (controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
-                          (controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
-                       DRM_INFO("Special thermal controller config\n");
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+                       DRM_INFO("External GPIO thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+                       DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+                       DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
                } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
                        DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
                                 pp_lib_thermal_controller_names[controller->ucType],
                                 controller->ucI2cAddress >> 1,
                                 (controller->ucFanParameters &
                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
                        i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
                        rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
                        if (rdev->pm.i2c_bus) {
index a9fb0d016d387683a1d4bb96f77c046b7cd804b7..8bc7d0bbd3c80a94529cea79bf4f078d7951a87a 100644 (file)
@@ -33,7 +33,6 @@ static struct radeon_atpx_priv {
        bool atpx_detected;
        /* handle for device - and atpx */
        acpi_handle dhandle;
-       acpi_handle other_handle;
        struct radeon_atpx atpx;
 } radeon_atpx_priv;
 
@@ -453,10 +452,9 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
                return false;
 
        status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
-       if (ACPI_FAILURE(status)) {
-               radeon_atpx_priv.other_handle = dhandle;
+       if (ACPI_FAILURE(status))
                return false;
-       }
+
        radeon_atpx_priv.dhandle = dhandle;
        radeon_atpx_priv.atpx.handle = atpx_handle;
        return true;
@@ -540,16 +538,6 @@ static bool radeon_atpx_detect(void)
                printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                radeon_atpx_priv.atpx_detected = true;
-               /*
-                * On some systems hotplug events are generated for the device
-                * being switched off when ATPX is executed.  They cause ACPI
-                * hotplug to trigger and attempt to remove the device from
-                * the system, which causes it to break down.  Prevent that from
-                * happening by setting the no_hotplug flag for the involved
-                * ACPI device objects.
-                */
-               acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
-               acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
                return true;
        }
        return false;
index 6a219bcee66d2d8a5d7b8bf49f2344584a0a36fd..12c8329644c4af4e8fe5224fbbad5d888d580f28 100644 (file)
@@ -123,6 +123,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
         * https://bugzilla.kernel.org/show_bug.cgi?id=51381
         */
        { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+       /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+        * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+        */
+       { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
        /* macbook pro 8.2 */
        { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
        { 0, 0, 0, 0, 0 },
@@ -1393,7 +1397,7 @@ int radeon_device_init(struct radeon_device *rdev,
 
        r = radeon_init(rdev);
        if (r)
-               return r;
+               goto failed;
 
        r = radeon_ib_ring_tests(rdev);
        if (r)
@@ -1413,7 +1417,7 @@ int radeon_device_init(struct radeon_device *rdev,
                radeon_agp_disable(rdev);
                r = radeon_init(rdev);
                if (r)
-                       return r;
+                       goto failed;
        }
 
        if ((radeon_testing & 1)) {
@@ -1435,6 +1439,11 @@ int radeon_device_init(struct radeon_device *rdev,
                        DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
        }
        return 0;
+
+failed:
+       if (runtime)
+               vga_switcheroo_fini_domain_pm_ops(rdev->dev);
+       return r;
 }
 
 static void radeon_debugfs_remove_files(struct radeon_device *rdev);
@@ -1455,6 +1464,8 @@ void radeon_device_fini(struct radeon_device *rdev)
        radeon_bo_evict_vram(rdev);
        radeon_fini(rdev);
        vga_switcheroo_unregister_client(rdev->pdev);
+       if (rdev->flags & RADEON_IS_PX)
+               vga_switcheroo_fini_domain_pm_ops(rdev->dev);
        vga_client_register(rdev->pdev, NULL, NULL, NULL);
        if (rdev->rio_mem)
                pci_iounmap(rdev->pdev, rdev->rio_mem);
index 8df888908833f5d1a3e8d9f745edb5fe9b6143ea..f9d17b29b343443de2f986b6008a9991085ff518 100644 (file)
@@ -83,7 +83,7 @@
  *            CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
  *   2.39.0 - Add INFO query for number of active CUs
  *   2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
- *            CS to GPU
+ *            CS to GPU on >= r600
  */
 #define KMS_DRIVER_MAJOR       2
 #define KMS_DRIVER_MINOR       40
@@ -181,6 +181,7 @@ int radeon_vm_block_size = -1;
 int radeon_deep_color = 0;
 int radeon_use_pflipirq = 2;
 int radeon_bapm = -1;
+int radeon_backlight = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -263,6 +264,9 @@ module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
 MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(bapm, radeon_bapm, int, 0444);
 
+MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(backlight, radeon_backlight, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
@@ -440,6 +444,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
        ret = radeon_suspend_kms(drm_dev, false, false);
        pci_save_state(pdev);
        pci_disable_device(pdev);
+       pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3cold);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 
index 3c2094c25b537516a8b181e26fd2bca00dca0411..15edf23b465c2ba90cca2f2ffb7c242971bda2bf 100644 (file)
@@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
        return ret;
 }
 
+static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+                                        struct drm_connector *connector)
+{
+       struct drm_device *dev = radeon_encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       bool use_bl = false;
+
+       if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
+               return;
+
+       if (radeon_backlight == 0) {
+               return;
+       } else if (radeon_backlight == 1) {
+               use_bl = true;
+       } else if (radeon_backlight == -1) {
+               /* Quirks */
+               /* Amilo Xi 2550 only works with acpi bl */
+               if ((rdev->pdev->device == 0x9583) &&
+                   (rdev->pdev->subsystem_vendor == 0x1734) &&
+                   (rdev->pdev->subsystem_device == 0x1107))
+                       use_bl = false;
+               else
+                       use_bl = true;
+       }
+
+       if (use_bl) {
+               if (rdev->is_atom_bios)
+                       radeon_atom_backlight_init(radeon_encoder, connector);
+               else
+                       radeon_legacy_backlight_init(radeon_encoder, connector);
+               rdev->mode_info.bl_encoder = radeon_encoder;
+       }
+}
+
 void
 radeon_link_encoder_connector(struct drm_device *dev)
 {
-       struct radeon_device *rdev = dev->dev_private;
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        struct drm_encoder *encoder;
@@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev)
                        radeon_encoder = to_radeon_encoder(encoder);
                        if (radeon_encoder->devices & radeon_connector->devices) {
                                drm_mode_connector_attach_encoder(connector, encoder);
-                               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                                       if (rdev->is_atom_bios)
-                                               radeon_atom_backlight_init(radeon_encoder, connector);
-                                       else
-                                               radeon_legacy_backlight_init(radeon_encoder, connector);
-                                       rdev->mode_info.bl_encoder = radeon_encoder;
-                               }
+                               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                                       radeon_encoder_add_backlight(radeon_encoder, connector);
                        }
                }
        }
index 56d9fd66d8aed379c417d1ba4a47de7aa65ba472..abd6753a570afa119ac1e68255231c99f7b27995 100644 (file)
@@ -34,7 +34,7 @@
 int radeon_semaphore_create(struct radeon_device *rdev,
                            struct radeon_semaphore **semaphore)
 {
-       uint32_t *cpu_addr;
+       uint64_t *cpu_addr;
        int i, r;
 
        *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
index 6c1fc339d228142fe09de798a62ad4df8880959b..c5799f16aa4b2f27157b3946ec4c7b65ac85011f 100644 (file)
@@ -221,9 +221,9 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
        entry = (lower_32_bits(addr) & PAGE_MASK) |
                ((upper_32_bits(addr) & 0xff) << 4);
        if (flags & RADEON_GART_PAGE_READ)
-               addr |= RS400_PTE_READABLE;
+               entry |= RS400_PTE_READABLE;
        if (flags & RADEON_GART_PAGE_WRITE)
-               addr |= RS400_PTE_WRITEABLE;
+               entry |= RS400_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                entry |= RS400_PTE_UNSNOOPED;
        entry = cpu_to_le32(entry);
index 6bce40847753b822f50be6037654c863833af068..3a0b973e8a96ef9f9b9aefb6e39a0459fd03ceff 100644 (file)
@@ -6316,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -6664,13 +6664,13 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index ef93156a69c6fdad9dd2af368ef68e8ecd9a9b9a..b22968c08d1fb7babe739939992a060eb9756cd3 100644 (file)
@@ -298,7 +298,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
        hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
 
        val = frame[0xC];
-       val |= frame[0xD] << 8;
        hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
 
        /* Enable transmission slot for AVI infoframe
index 6866448083b2c1d6ad99686c2854db9ad94a5bb7..37ac7b5dbd066025c90009d6b44a1c08fffcc800 100644 (file)
@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
 }
 EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
 
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
+{
+       dev->pm_domain = NULL;
+}
+EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
+
 static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
index d2077f040f3ec87f0922b8eb67db413539851e53..77711623b9739526f8605fef680120424e4f9a59 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/poll.h>
 #include <linux/miscdevice.h>
 #include <linux/slab.h>
+#include <linux/screen_info.h>
 
 #include <linux/uaccess.h>
 
@@ -112,10 +113,8 @@ both:
        return 1;
 }
 
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
 /* this is only used a cookie - it should not be dereferenced */
 static struct pci_dev *vga_default;
-#endif
 
 static void vga_arb_device_card_gone(struct pci_dev *pdev);
 
@@ -131,7 +130,6 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
 }
 
 /* Returns the default VGA device (vgacon's babe) */
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
 struct pci_dev *vga_default_device(void)
 {
        return vga_default;
@@ -147,7 +145,6 @@ void vga_set_default_device(struct pci_dev *pdev)
        pci_dev_put(vga_default);
        vga_default = pci_dev_get(pdev);
 }
-#endif
 
 static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
 {
@@ -583,11 +580,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
        /* Deal with VGA default device. Use first enabled one
         * by default if arch doesn't have it's own hook
         */
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
        if (vga_default == NULL &&
-           ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
+           ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
+               pr_info("vgaarb: setting as boot device: PCI:%s\n",
+                       pci_name(pdev));
                vga_set_default_device(pdev);
-#endif
+       }
 
        vga_arbiter_check_bridge_sharing(vgadev);
 
@@ -621,10 +619,8 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
                goto bail;
        }
 
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
        if (vga_default == pdev)
                vga_set_default_device(NULL);
-#endif
 
        if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
                vga_decode_count--;
@@ -1320,6 +1316,38 @@ static int __init vga_arb_device_init(void)
        pr_info("vgaarb: loaded\n");
 
        list_for_each_entry(vgadev, &vga_list, list) {
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+               /* Override I/O based detection done by vga_arbiter_add_pci_device()
+                * as it may take the wrong device (e.g. on Apple system under EFI).
+                *
+                * Select the device owning the boot framebuffer if there is one.
+                */
+               resource_size_t start, end;
+               int i;
+
+               /* Does firmware framebuffer belong to us? */
+               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+                       if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+                               continue;
+
+                       start = pci_resource_start(vgadev->pdev, i);
+                       end  = pci_resource_end(vgadev->pdev, i);
+
+                       if (!start || !end)
+                               continue;
+
+                       if (screen_info.lfb_base < start ||
+                           (screen_info.lfb_base + screen_info.lfb_size) >= end)
+                               continue;
+                       if (!vga_default_device())
+                               pr_info("vgaarb: setting as boot device: PCI:%s\n",
+                                       pci_name(vgadev->pdev));
+                       else if (vgadev->pdev != vga_default_device())
+                               pr_info("vgaarb: overriding boot device: PCI:%s\n",
+                                       pci_name(vgadev->pdev));
+                       vga_set_default_device(vgadev->pdev);
+               }
+#endif
                if (vgadev->bridge_has_one_vga)
                        pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
                else
index 4a7cbfad1d74a7355786c081cca1d4d956083044..fcdbde4ec692f25c11f0a44b281a3770ede2d0c0 100644 (file)
@@ -93,13 +93,29 @@ static ssize_t show_power_crit(struct device *dev,
 }
 static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
 
+static umode_t fam15h_power_is_visible(struct kobject *kobj,
+                                      struct attribute *attr,
+                                      int index)
+{
+       /* power1_input is only reported for Fam15h, Models 00h-0fh */
+       if (attr == &dev_attr_power1_input.attr &&
+          (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf))
+               return 0;
+
+       return attr->mode;
+}
+
 static struct attribute *fam15h_power_attrs[] = {
        &dev_attr_power1_input.attr,
        &dev_attr_power1_crit.attr,
        NULL
 };
 
-ATTRIBUTE_GROUPS(fam15h_power);
+static const struct attribute_group fam15h_power_group = {
+       .attrs = fam15h_power_attrs,
+       .is_visible = fam15h_power_is_visible,
+};
+__ATTRIBUTE_GROUPS(fam15h_power);
 
 static bool fam15h_power_is_internal_node0(struct pci_dev *f4)
 {
@@ -216,7 +232,9 @@ static int fam15h_power_probe(struct pci_dev *pdev,
 
 static const struct pci_device_id fam15h_power_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
index e42964f07f67ddcb3cba6b8d091bb22d61488609..ad571ec795a3354112c3592ecebef641d15b6186 100644 (file)
@@ -145,7 +145,7 @@ static int tmp103_probe(struct i2c_client *client,
        }
 
        i2c_set_clientdata(client, regmap);
-       hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
                                                      regmap, tmp103_groups);
        return PTR_ERR_OR_ZERO(hwmon_dev);
 }
index e0228b228256a8771df7b0be2536c3c42f90448d..1722f50f247385e0247c98e5dee91c97f18203ee 100644 (file)
@@ -2,11 +2,8 @@
 # Makefile for the i2c core.
 #
 
-i2ccore-y := i2c-core.o
-i2ccore-$(CONFIG_ACPI)         += i2c-acpi.o
-
 obj-$(CONFIG_I2C_BOARDINFO)    += i2c-boardinfo.o
-obj-$(CONFIG_I2C)              += i2ccore.o
+obj-$(CONFIG_I2C)              += i2c-core.o
 obj-$(CONFIG_I2C_SMBUS)                += i2c-smbus.o
 obj-$(CONFIG_I2C_CHARDEV)      += i2c-dev.o
 obj-$(CONFIG_I2C_MUX)          += i2c-mux.o
index 984492553e95a693500c75a161d3e4fe5d48b580..d9ee43c80cde8c6fd4163a0ffde8d8b2116172b0 100644 (file)
@@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
                        desc->wr_len_cmd = dma_size;
                        desc->control |= ISMT_DESC_BLK;
                        priv->dma_buffer[0] = command;
-                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
                } else {
                        /* Block Read */
                        dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA:  READ\n");
@@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
                        desc->wr_len_cmd = dma_size;
                        desc->control |= ISMT_DESC_I2C;
                        priv->dma_buffer[0] = command;
-                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
                } else {
                        /* i2c Block Read */
                        dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA:  READ\n");
index 7170fc8928293a1cfccdb0704c274d4705b4b1de..65a21fed08b5223dbccbe76296082e98314505ba 100644 (file)
@@ -429,7 +429,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
                ret = mxs_i2c_pio_wait_xfer_end(i2c);
                if (ret) {
                        dev_err(i2c->dev,
-                               "PIO: Failed to send SELECT command!\n");
+                               "PIO: Failed to send READ command!\n");
                        goto cleanup;
                }
 
index 3a4d64e1dfb16fe78013954f4441a93edb00c605..092d89bd32241b1e651414ee3c452ce248f36ec2 100644 (file)
@@ -674,16 +674,20 @@ static int qup_i2c_probe(struct platform_device *pdev)
        qup->adap.dev.of_node = pdev->dev.of_node;
        strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
 
-       ret = i2c_add_adapter(&qup->adap);
-       if (ret)
-               goto fail;
-
        pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
        pm_runtime_use_autosuspend(qup->dev);
        pm_runtime_set_active(qup->dev);
        pm_runtime_enable(qup->dev);
+
+       ret = i2c_add_adapter(&qup->adap);
+       if (ret)
+               goto fail_runtime;
+
        return 0;
 
+fail_runtime:
+       pm_runtime_disable(qup->dev);
+       pm_runtime_set_suspended(qup->dev);
 fail:
        qup_i2c_disable_clocks(qup);
        return ret;
index 1cc146cfc1f3dce02d4d83c7e79b698f07b161c9..e506fcd3ca04350416dce7a640f2483a97d5d34d 100644 (file)
@@ -76,8 +76,8 @@
 #define RCAR_IRQ_RECV  (MNR | MAL | MST | MAT | MDR)
 #define RCAR_IRQ_STOP  (MST)
 
-#define RCAR_IRQ_ACK_SEND      (~(MAT | MDE))
-#define RCAR_IRQ_ACK_RECV      (~(MAT | MDR))
+#define RCAR_IRQ_ACK_SEND      (~(MAT | MDE) & 0xFF)
+#define RCAR_IRQ_ACK_RECV      (~(MAT | MDR) & 0xFF)
 
 #define ID_LAST_MSG    (1 << 0)
 #define ID_IOERROR     (1 << 1)
index e637c32ae5172bcaa77bdd2c5eda8dee34457176..b38b0529946a1ca1c0e463f4a25e5cbbf021f329 100644 (file)
@@ -238,7 +238,7 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
        for (i = 0; i < 8; ++i) {
                val = 0;
                for (j = 0; j < 4; ++j) {
-                       if (i2c->processed == i2c->msg->len)
+                       if ((i2c->processed == i2c->msg->len) && (cnt != 0))
                                break;
 
                        if (i2c->processed == 0 && cnt == 0)
@@ -433,12 +433,11 @@ static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
        unsigned long i2c_rate = clk_get_rate(i2c->clk);
        unsigned int div;
 
-       /* SCL rate = (clk rate) / (8 * DIV) */
-       div = DIV_ROUND_UP(i2c_rate, scl_rate * 8);
-
-       /* The lower and upper half of the CLKDIV reg describe the length of
-        * SCL low & high periods. */
-       div = DIV_ROUND_UP(div, 2);
+       /* set DIV = DIVH = DIVL
+        * SCL rate = (clk rate) / (8 * (DIVH + 1 + DIVL + 1))
+        *          = (clk rate) / (16 * (DIV + 1))
+        */
+       div = DIV_ROUND_UP(i2c_rate, scl_rate * 16) - 1;
 
        i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
 }
index 87d0371cebb71c0e9e6bb348878e0ea871d3a519..efba1ebe16ba1d4b46156a7d0b39a2714c195c66 100644 (file)
@@ -380,34 +380,33 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
 {
        int ret;
        if (!i2c_dev->hw->has_single_clk_source) {
-               ret = clk_prepare_enable(i2c_dev->fast_clk);
+               ret = clk_enable(i2c_dev->fast_clk);
                if (ret < 0) {
                        dev_err(i2c_dev->dev,
                                "Enabling fast clk failed, err %d\n", ret);
                        return ret;
                }
        }
-       ret = clk_prepare_enable(i2c_dev->div_clk);
+       ret = clk_enable(i2c_dev->div_clk);
        if (ret < 0) {
                dev_err(i2c_dev->dev,
                        "Enabling div clk failed, err %d\n", ret);
-               clk_disable_unprepare(i2c_dev->fast_clk);
+               clk_disable(i2c_dev->fast_clk);
        }
        return ret;
 }
 
 static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev)
 {
-       clk_disable_unprepare(i2c_dev->div_clk);
+       clk_disable(i2c_dev->div_clk);
        if (!i2c_dev->hw->has_single_clk_source)
-               clk_disable_unprepare(i2c_dev->fast_clk);
+               clk_disable(i2c_dev->fast_clk);
 }
 
 static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
 {
        u32 val;
        int err = 0;
-       int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
        u32 clk_divisor;
 
        err = tegra_i2c_clock_enable(i2c_dev);
@@ -428,9 +427,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
        i2c_writel(i2c_dev, val, I2C_CNFG);
        i2c_writel(i2c_dev, 0, I2C_INT_MASK);
 
-       clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
-       clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier);
-
        /* Make sure clock divisor programmed correctly */
        clk_divisor = i2c_dev->hw->clk_divisor_hs_mode;
        clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode <<
@@ -712,6 +708,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        void __iomem *base;
        int irq;
        int ret = 0;
+       int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
@@ -777,17 +774,39 @@ static int tegra_i2c_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, i2c_dev);
 
+       if (!i2c_dev->hw->has_single_clk_source) {
+               ret = clk_prepare(i2c_dev->fast_clk);
+               if (ret < 0) {
+                       dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+                       return ret;
+               }
+       }
+
+       clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
+       ret = clk_set_rate(i2c_dev->div_clk,
+                          i2c_dev->bus_clk_rate * clk_multiplier);
+       if (ret) {
+               dev_err(i2c_dev->dev, "Clock rate change failed %d\n", ret);
+               goto unprepare_fast_clk;
+       }
+
+       ret = clk_prepare(i2c_dev->div_clk);
+       if (ret < 0) {
+               dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+               goto unprepare_fast_clk;
+       }
+
        ret = tegra_i2c_init(i2c_dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize i2c controller");
-               return ret;
+               goto unprepare_div_clk;
        }
 
        ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
                        tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
-               return ret;
+               goto unprepare_div_clk;
        }
 
        i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
@@ -803,16 +822,30 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
        if (ret) {
                dev_err(&pdev->dev, "Failed to add I2C adapter\n");
-               return ret;
+               goto unprepare_div_clk;
        }
 
        return 0;
+
+unprepare_div_clk:
+       clk_unprepare(i2c_dev->div_clk);
+
+unprepare_fast_clk:
+       if (!i2c_dev->hw->has_single_clk_source)
+               clk_unprepare(i2c_dev->fast_clk);
+
+       return ret;
 }
 
 static int tegra_i2c_remove(struct platform_device *pdev)
 {
        struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
        i2c_del_adapter(&i2c_dev->adapter);
+
+       clk_unprepare(i2c_dev->div_clk);
+       if (!i2c_dev->hw->has_single_clk_source)
+               clk_unprepare(i2c_dev->fast_clk);
+
        return 0;
 }
 
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c
deleted file mode 100644 (file)
index 0dbc18c..0000000
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * I2C ACPI code
- *
- * Copyright (C) 2014 Intel Corp
- *
- * Author: Lan Tianyu <tianyu.lan@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- */
-#define pr_fmt(fmt) "I2C/ACPI : " fmt
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
-
-struct acpi_i2c_handler_data {
-       struct acpi_connection_info info;
-       struct i2c_adapter *adapter;
-};
-
-struct gsb_buffer {
-       u8      status;
-       u8      len;
-       union {
-               u16     wdata;
-               u8      bdata;
-               u8      data[0];
-       };
-} __packed;
-
-static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
-{
-       struct i2c_board_info *info = data;
-
-       if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
-               struct acpi_resource_i2c_serialbus *sb;
-
-               sb = &ares->data.i2c_serial_bus;
-               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
-                       info->addr = sb->slave_address;
-                       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
-                               info->flags |= I2C_CLIENT_TEN;
-               }
-       } else if (info->irq < 0) {
-               struct resource r;
-
-               if (acpi_dev_resource_interrupt(ares, 0, &r))
-                       info->irq = r.start;
-       }
-
-       /* Tell the ACPI core to skip this resource */
-       return 1;
-}
-
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
-                                      void *data, void **return_value)
-{
-       struct i2c_adapter *adapter = data;
-       struct list_head resource_list;
-       struct i2c_board_info info;
-       struct acpi_device *adev;
-       int ret;
-
-       if (acpi_bus_get_device(handle, &adev))
-               return AE_OK;
-       if (acpi_bus_get_status(adev) || !adev->status.present)
-               return AE_OK;
-
-       memset(&info, 0, sizeof(info));
-       info.acpi_node.companion = adev;
-       info.irq = -1;
-
-       INIT_LIST_HEAD(&resource_list);
-       ret = acpi_dev_get_resources(adev, &resource_list,
-                                    acpi_i2c_add_resource, &info);
-       acpi_dev_free_resource_list(&resource_list);
-
-       if (ret < 0 || !info.addr)
-               return AE_OK;
-
-       adev->power.flags.ignore_parent = true;
-       strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
-       if (!i2c_new_device(adapter, &info)) {
-               adev->power.flags.ignore_parent = false;
-               dev_err(&adapter->dev,
-                       "failed to add I2C device %s from ACPI\n",
-                       dev_name(&adev->dev));
-       }
-
-       return AE_OK;
-}
-
-/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
- * @adap: pointer to adapter
- *
- * Enumerate all I2C slave devices behind this adapter by walking the ACPI
- * namespace. When a device is found it will be added to the Linux device
- * model and bound to the corresponding ACPI handle.
- */
-void acpi_i2c_register_devices(struct i2c_adapter *adap)
-{
-       acpi_handle handle;
-       acpi_status status;
-
-       if (!adap->dev.parent)
-               return;
-
-       handle = ACPI_HANDLE(adap->dev.parent);
-       if (!handle)
-               return;
-
-       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
-                                    acpi_i2c_add_device, NULL,
-                                    adap, NULL);
-       if (ACPI_FAILURE(status))
-               dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
-}
-
-#ifdef CONFIG_ACPI_I2C_OPREGION
-static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
-               u8 cmd, u8 *data, u8 data_len)
-{
-
-       struct i2c_msg msgs[2];
-       int ret;
-       u8 *buffer;
-
-       buffer = kzalloc(data_len, GFP_KERNEL);
-       if (!buffer)
-               return AE_NO_MEMORY;
-
-       msgs[0].addr = client->addr;
-       msgs[0].flags = client->flags;
-       msgs[0].len = 1;
-       msgs[0].buf = &cmd;
-
-       msgs[1].addr = client->addr;
-       msgs[1].flags = client->flags | I2C_M_RD;
-       msgs[1].len = data_len;
-       msgs[1].buf = buffer;
-
-       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-       if (ret < 0)
-               dev_err(&client->adapter->dev, "i2c read failed\n");
-       else
-               memcpy(data, buffer, data_len);
-
-       kfree(buffer);
-       return ret;
-}
-
-static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
-               u8 cmd, u8 *data, u8 data_len)
-{
-
-       struct i2c_msg msgs[1];
-       u8 *buffer;
-       int ret = AE_OK;
-
-       buffer = kzalloc(data_len + 1, GFP_KERNEL);
-       if (!buffer)
-               return AE_NO_MEMORY;
-
-       buffer[0] = cmd;
-       memcpy(buffer + 1, data, data_len);
-
-       msgs[0].addr = client->addr;
-       msgs[0].flags = client->flags;
-       msgs[0].len = data_len + 1;
-       msgs[0].buf = buffer;
-
-       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-       if (ret < 0)
-               dev_err(&client->adapter->dev, "i2c write failed\n");
-
-       kfree(buffer);
-       return ret;
-}
-
-static acpi_status
-acpi_i2c_space_handler(u32 function, acpi_physical_address command,
-                       u32 bits, u64 *value64,
-                       void *handler_context, void *region_context)
-{
-       struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
-       struct acpi_i2c_handler_data *data = handler_context;
-       struct acpi_connection_info *info = &data->info;
-       struct acpi_resource_i2c_serialbus *sb;
-       struct i2c_adapter *adapter = data->adapter;
-       struct i2c_client client;
-       struct acpi_resource *ares;
-       u32 accessor_type = function >> 16;
-       u8 action = function & ACPI_IO_MASK;
-       acpi_status ret = AE_OK;
-       int status;
-
-       ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
-       if (ACPI_FAILURE(ret))
-               return ret;
-
-       if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
-               ret = AE_BAD_PARAMETER;
-               goto err;
-       }
-
-       sb = &ares->data.i2c_serial_bus;
-       if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
-               ret = AE_BAD_PARAMETER;
-               goto err;
-       }
-
-       memset(&client, 0, sizeof(client));
-       client.adapter = adapter;
-       client.addr = sb->slave_address;
-       client.flags = 0;
-
-       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
-               client.flags |= I2C_CLIENT_TEN;
-
-       switch (accessor_type) {
-       case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_byte(&client);
-                       if (status >= 0) {
-                               gsb->bdata = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_byte(&client, gsb->bdata);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_BYTE:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_byte_data(&client, command);
-                       if (status >= 0) {
-                               gsb->bdata = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_byte_data(&client, command,
-                                       gsb->bdata);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_WORD:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_word_data(&client, command);
-                       if (status >= 0) {
-                               gsb->wdata = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_word_data(&client, command,
-                                       gsb->wdata);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_BLOCK:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_block_data(&client, command,
-                                       gsb->data);
-                       if (status >= 0) {
-                               gsb->len = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_block_data(&client, command,
-                                       gsb->len, gsb->data);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE:
-               if (action == ACPI_READ) {
-                       status = acpi_gsb_i2c_read_bytes(&client, command,
-                                       gsb->data, info->access_length);
-                       if (status > 0)
-                               status = 0;
-               } else {
-                       status = acpi_gsb_i2c_write_bytes(&client, command,
-                                       gsb->data, info->access_length);
-               }
-               break;
-
-       default:
-               pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
-               ret = AE_BAD_PARAMETER;
-               goto err;
-       }
-
-       gsb->status = status;
-
- err:
-       ACPI_FREE(ares);
-       return ret;
-}
-
-
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
-{
-       acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
-       struct acpi_i2c_handler_data *data;
-       acpi_status status;
-
-       if (!handle)
-               return -ENODEV;
-
-       data = kzalloc(sizeof(struct acpi_i2c_handler_data),
-                           GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       data->adapter = adapter;
-       status = acpi_bus_attach_private_data(handle, (void *)data);
-       if (ACPI_FAILURE(status)) {
-               kfree(data);
-               return -ENOMEM;
-       }
-
-       status = acpi_install_address_space_handler(handle,
-                               ACPI_ADR_SPACE_GSBUS,
-                               &acpi_i2c_space_handler,
-                               NULL,
-                               data);
-       if (ACPI_FAILURE(status)) {
-               dev_err(&adapter->dev, "Error installing i2c space handler\n");
-               acpi_bus_detach_private_data(handle);
-               kfree(data);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
-{
-       acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
-       struct acpi_i2c_handler_data *data;
-       acpi_status status;
-
-       if (!handle)
-               return;
-
-       acpi_remove_address_space_handler(handle,
-                               ACPI_ADR_SPACE_GSBUS,
-                               &acpi_i2c_space_handler);
-
-       status = acpi_bus_get_private_data(handle, (void **)&data);
-       if (ACPI_SUCCESS(status))
-               kfree(data);
-
-       acpi_bus_detach_private_data(handle);
-}
-#endif
index 632057a4461589c3f7135f4d5feac4a0106041b0..ccfbbab82a157da532fb039f7032d0de38680d0f 100644 (file)
@@ -27,6 +27,8 @@
    OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
    (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and
    (c) 2013  Wolfram Sang <wsa@the-dreams.de>
+   I2C ACPI code Copyright (C) 2014 Intel Corp
+   Author: Lan Tianyu <tianyu.lan@intel.com>
  */
 
 #include <linux/module.h>
@@ -78,6 +80,368 @@ void i2c_transfer_trace_unreg(void)
        static_key_slow_dec(&i2c_trace_msg);
 }
 
+#if defined(CONFIG_ACPI)
+struct acpi_i2c_handler_data {
+       struct acpi_connection_info info;
+       struct i2c_adapter *adapter;
+};
+
+struct gsb_buffer {
+       u8      status;
+       u8      len;
+       union {
+               u16     wdata;
+               u8      bdata;
+               u8      data[0];
+       };
+} __packed;
+
+static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
+{
+       struct i2c_board_info *info = data;
+
+       if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+               struct acpi_resource_i2c_serialbus *sb;
+
+               sb = &ares->data.i2c_serial_bus;
+               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+                       info->addr = sb->slave_address;
+                       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+                               info->flags |= I2C_CLIENT_TEN;
+               }
+       } else if (info->irq < 0) {
+               struct resource r;
+
+               if (acpi_dev_resource_interrupt(ares, 0, &r))
+                       info->irq = r.start;
+       }
+
+       /* Tell the ACPI core to skip this resource */
+       return 1;
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+                                      void *data, void **return_value)
+{
+       struct i2c_adapter *adapter = data;
+       struct list_head resource_list;
+       struct i2c_board_info info;
+       struct acpi_device *adev;
+       int ret;
+
+       if (acpi_bus_get_device(handle, &adev))
+               return AE_OK;
+       if (acpi_bus_get_status(adev) || !adev->status.present)
+               return AE_OK;
+
+       memset(&info, 0, sizeof(info));
+       info.acpi_node.companion = adev;
+       info.irq = -1;
+
+       INIT_LIST_HEAD(&resource_list);
+       ret = acpi_dev_get_resources(adev, &resource_list,
+                                    acpi_i2c_add_resource, &info);
+       acpi_dev_free_resource_list(&resource_list);
+
+       if (ret < 0 || !info.addr)
+               return AE_OK;
+
+       adev->power.flags.ignore_parent = true;
+       strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
+       if (!i2c_new_device(adapter, &info)) {
+               adev->power.flags.ignore_parent = false;
+               dev_err(&adapter->dev,
+                       "failed to add I2C device %s from ACPI\n",
+                       dev_name(&adev->dev));
+       }
+
+       return AE_OK;
+}
+
+/**
+ * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * @adap: pointer to adapter
+ *
+ * Enumerate all I2C slave devices behind this adapter by walking the ACPI
+ * namespace. When a device is found it will be added to the Linux device
+ * model and bound to the corresponding ACPI handle.
+ */
+static void acpi_i2c_register_devices(struct i2c_adapter *adap)
+{
+       acpi_handle handle;
+       acpi_status status;
+
+       if (!adap->dev.parent)
+               return;
+
+       handle = ACPI_HANDLE(adap->dev.parent);
+       if (!handle)
+               return;
+
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+                                    acpi_i2c_add_device, NULL,
+                                    adap, NULL);
+       if (ACPI_FAILURE(status))
+               dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
+}
+
+#else /* CONFIG_ACPI */
+static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_I2C_OPREGION
+static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
+               u8 cmd, u8 *data, u8 data_len)
+{
+
+       struct i2c_msg msgs[2];
+       int ret;
+       u8 *buffer;
+
+       buffer = kzalloc(data_len, GFP_KERNEL);
+       if (!buffer)
+               return AE_NO_MEMORY;
+
+       msgs[0].addr = client->addr;
+       msgs[0].flags = client->flags;
+       msgs[0].len = 1;
+       msgs[0].buf = &cmd;
+
+       msgs[1].addr = client->addr;
+       msgs[1].flags = client->flags | I2C_M_RD;
+       msgs[1].len = data_len;
+       msgs[1].buf = buffer;
+
+       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+       if (ret < 0)
+               dev_err(&client->adapter->dev, "i2c read failed\n");
+       else
+               memcpy(data, buffer, data_len);
+
+       kfree(buffer);
+       return ret;
+}
+
+static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
+               u8 cmd, u8 *data, u8 data_len)
+{
+
+       struct i2c_msg msgs[1];
+       u8 *buffer;
+       int ret = AE_OK;
+
+       buffer = kzalloc(data_len + 1, GFP_KERNEL);
+       if (!buffer)
+               return AE_NO_MEMORY;
+
+       buffer[0] = cmd;
+       memcpy(buffer + 1, data, data_len);
+
+       msgs[0].addr = client->addr;
+       msgs[0].flags = client->flags;
+       msgs[0].len = data_len + 1;
+       msgs[0].buf = buffer;
+
+       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+       if (ret < 0)
+               dev_err(&client->adapter->dev, "i2c write failed\n");
+
+       kfree(buffer);
+       return ret;
+}
+
+static acpi_status
+acpi_i2c_space_handler(u32 function, acpi_physical_address command,
+                       u32 bits, u64 *value64,
+                       void *handler_context, void *region_context)
+{
+       struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
+       struct acpi_i2c_handler_data *data = handler_context;
+       struct acpi_connection_info *info = &data->info;
+       struct acpi_resource_i2c_serialbus *sb;
+       struct i2c_adapter *adapter = data->adapter;
+       struct i2c_client client;
+       struct acpi_resource *ares;
+       u32 accessor_type = function >> 16;
+       u8 action = function & ACPI_IO_MASK;
+       acpi_status ret = AE_OK;
+       int status;
+
+       ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
+       if (ACPI_FAILURE(ret))
+               return ret;
+
+       if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+               ret = AE_BAD_PARAMETER;
+               goto err;
+       }
+
+       sb = &ares->data.i2c_serial_bus;
+       if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+               ret = AE_BAD_PARAMETER;
+               goto err;
+       }
+
+       memset(&client, 0, sizeof(client));
+       client.adapter = adapter;
+       client.addr = sb->slave_address;
+       client.flags = 0;
+
+       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+               client.flags |= I2C_CLIENT_TEN;
+
+       switch (accessor_type) {
+       case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_byte(&client);
+                       if (status >= 0) {
+                               gsb->bdata = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_byte(&client, gsb->bdata);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_BYTE:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_byte_data(&client, command);
+                       if (status >= 0) {
+                               gsb->bdata = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_byte_data(&client, command,
+                                       gsb->bdata);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_WORD:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_word_data(&client, command);
+                       if (status >= 0) {
+                               gsb->wdata = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_word_data(&client, command,
+                                       gsb->wdata);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_BLOCK:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_block_data(&client, command,
+                                       gsb->data);
+                       if (status >= 0) {
+                               gsb->len = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_block_data(&client, command,
+                                       gsb->len, gsb->data);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE:
+               if (action == ACPI_READ) {
+                       status = acpi_gsb_i2c_read_bytes(&client, command,
+                                       gsb->data, info->access_length);
+                       if (status > 0)
+                               status = 0;
+               } else {
+                       status = acpi_gsb_i2c_write_bytes(&client, command,
+                                       gsb->data, info->access_length);
+               }
+               break;
+
+       default:
+               pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
+               ret = AE_BAD_PARAMETER;
+               goto err;
+       }
+
+       gsb->status = status;
+
+ err:
+       ACPI_FREE(ares);
+       return ret;
+}
+
+
+static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+{
+       acpi_handle handle;
+       struct acpi_i2c_handler_data *data;
+       acpi_status status;
+
+       if (!adapter->dev.parent)
+               return -ENODEV;
+
+       handle = ACPI_HANDLE(adapter->dev.parent);
+
+       if (!handle)
+               return -ENODEV;
+
+       data = kzalloc(sizeof(struct acpi_i2c_handler_data),
+                           GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->adapter = adapter;
+       status = acpi_bus_attach_private_data(handle, (void *)data);
+       if (ACPI_FAILURE(status)) {
+               kfree(data);
+               return -ENOMEM;
+       }
+
+       status = acpi_install_address_space_handler(handle,
+                               ACPI_ADR_SPACE_GSBUS,
+                               &acpi_i2c_space_handler,
+                               NULL,
+                               data);
+       if (ACPI_FAILURE(status)) {
+               dev_err(&adapter->dev, "Error installing i2c space handler\n");
+               acpi_bus_detach_private_data(handle);
+               kfree(data);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+{
+       acpi_handle handle;
+       struct acpi_i2c_handler_data *data;
+       acpi_status status;
+
+       if (!adapter->dev.parent)
+               return;
+
+       handle = ACPI_HANDLE(adapter->dev.parent);
+
+       if (!handle)
+               return;
+
+       acpi_remove_address_space_handler(handle,
+                               ACPI_ADR_SPACE_GSBUS,
+                               &acpi_i2c_space_handler);
+
+       status = acpi_bus_get_private_data(handle, (void **)&data);
+       if (ACPI_SUCCESS(status))
+               kfree(data);
+
+       acpi_bus_detach_private_data(handle);
+}
+#else /* CONFIG_ACPI_I2C_OPREGION */
+static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+{ }
+
+static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+{ return 0; }
+#endif /* CONFIG_ACPI_I2C_OPREGION */
+
 /* ------------------------------------------------------------------------- */
 
 static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
index a077cc86421b5eb24519ab25a61ebb41105af5e8..19100fddd2edb7e8c11f367ae4061346fcd10089 100644 (file)
@@ -571,7 +571,7 @@ static int bma180_probe(struct i2c_client *client,
        trig->ops = &bma180_trigger_ops;
        iio_trigger_set_drvdata(trig, indio_dev);
        data->trig = trig;
-       indio_dev->trig = trig;
+       indio_dev->trig = iio_trigger_get(trig);
 
        ret = iio_trigger_register(trig);
        if (ret)
index c55b81f7f9702b6955b7f01a31e24b3ade3d47fa..d10bd0c97233fa2351954e8bc6fdcd0b299e61dd 100644 (file)
@@ -472,7 +472,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
                goto error_free_irq;
 
        /* select default trigger */
-       indio_dev->trig = sigma_delta->trig;
+       indio_dev->trig = iio_trigger_get(sigma_delta->trig);
 
        return 0;
 
index 772e869c280ed2c7a275ba3516fa66b997d00379..7eadaf16adc1eb97ed2e49562558294012db2ad0 100644 (file)
@@ -196,6 +196,7 @@ struct at91_adc_state {
        bool                    done;
        int                     irq;
        u16                     last_value;
+       int                     chnb;
        struct mutex            lock;
        u8                      num_channels;
        void __iomem            *reg_base;
@@ -274,7 +275,7 @@ void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
                disable_irq_nosync(irq);
                iio_trigger_poll(idev->trig);
        } else {
-               st->last_value = at91_adc_readl(st, AT91_ADC_LCDR);
+               st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb));
                st->done = true;
                wake_up_interruptible(&st->wq_data_avail);
        }
@@ -351,7 +352,7 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
        unsigned int reg;
 
        status &= at91_adc_readl(st, AT91_ADC_IMR);
-       if (status & st->registers->drdy_mask)
+       if (status & GENMASK(st->num_channels - 1, 0))
                handle_adc_eoc_trigger(irq, idev);
 
        if (status & AT91RL_ADC_IER_PEN) {
@@ -418,7 +419,7 @@ static irqreturn_t at91_adc_9x5_interrupt(int irq, void *private)
                AT91_ADC_IER_YRDY |
                AT91_ADC_IER_PRDY;
 
-       if (status & st->registers->drdy_mask)
+       if (status & GENMASK(st->num_channels - 1, 0))
                handle_adc_eoc_trigger(irq, idev);
 
        if (status & AT91_ADC_IER_PEN) {
@@ -689,9 +690,10 @@ static int at91_adc_read_raw(struct iio_dev *idev,
        case IIO_CHAN_INFO_RAW:
                mutex_lock(&st->lock);
 
+               st->chnb = chan->channel;
                at91_adc_writel(st, AT91_ADC_CHER,
                                AT91_ADC_CH(chan->channel));
-               at91_adc_writel(st, AT91_ADC_IER, st->registers->drdy_mask);
+               at91_adc_writel(st, AT91_ADC_IER, BIT(chan->channel));
                at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_START);
 
                ret = wait_event_interruptible_timeout(st->wq_data_avail,
@@ -708,7 +710,7 @@ static int at91_adc_read_raw(struct iio_dev *idev,
 
                at91_adc_writel(st, AT91_ADC_CHDR,
                                AT91_ADC_CH(chan->channel));
-               at91_adc_writel(st, AT91_ADC_IDR, st->registers->drdy_mask);
+               at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
 
                st->last_value = 0;
                st->done = false;
index fd2745c629436f6fa51c15a851c6b7ace8d14400..626b397497670a7c76cd2241370503df4c0afb09 100644 (file)
@@ -1126,7 +1126,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
                                chan->address = XADC_REG_VPVN;
                        } else {
                                chan->scan_index = 15 + reg;
-                               chan->scan_index = XADC_REG_VAUX(reg - 1);
+                               chan->address = XADC_REG_VAUX(reg - 1);
                        }
                        num_channels++;
                        chan++;
index a3109a6f4d86569a86d445f8a967fe4b3a4d8cab..92068cdbf8c7a6e1423c889f6de791cc25141071 100644 (file)
@@ -122,7 +122,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
                dev_err(&indio_dev->dev, "Trigger Register Failed\n");
                goto error_free_trig;
        }
-       indio_dev->trig = attrb->trigger = trig;
+       attrb->trigger = trig;
+       indio_dev->trig = iio_trigger_get(trig);
 
        return ret;
 
index 8fc3a97eb266e6302367653248017c3b6078a021..8d8ca6f1e16a5d602b182355fc1f051738dc48c2 100644 (file)
@@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
                goto iio_trigger_register_error;
        }
-       indio_dev->trig = sdata->trig;
+       indio_dev->trig = iio_trigger_get(sdata->trig);
 
        return 0;
 
index e3b3c5084070d808ca88939e3f13257472ba3e55..eef50e91f17cfe56930357e4404eafd11a7836d4 100644 (file)
@@ -132,7 +132,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev)
                goto error_free_irq;
 
        /* select default trigger */
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
 
        return 0;
 
index 03b9372c1212a99c5a6a2452cc081f7395a259ca..926fccea8de0233bc955da2ea7c5dc903098f76f 100644 (file)
@@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
        ret = iio_trigger_register(st->trig);
        if (ret)
                goto error_free_irq;
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
 
        return 0;
 
index c7497009d60ab7b81358b85d9ecd4057642276e7..f0846108d0067b4a979f3c3b86624b6e211cfa76 100644 (file)
@@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
                        index = of_property_match_string(np, "io-channel-names",
                                                         name);
                chan = of_iio_channel_get(np, index);
-               if (!IS_ERR(chan))
+               if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
                        break;
                else if (name && index >= 0) {
                        pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
index a4b64130ac2f862996aa28522176bcee8ef904e3..68cae86dbd29e2ef7d93e111d05fa8178e451469 100644 (file)
@@ -42,7 +42,8 @@
 #define ST_MAGN_FS_AVL_5600MG                  5600
 #define ST_MAGN_FS_AVL_8000MG                  8000
 #define ST_MAGN_FS_AVL_8100MG                  8100
-#define ST_MAGN_FS_AVL_10000MG                 10000
+#define ST_MAGN_FS_AVL_12000MG                 12000
+#define ST_MAGN_FS_AVL_16000MG                 16000
 
 /* CUSTOM VALUES FOR SENSOR 1 */
 #define ST_MAGN_1_WAI_EXP                      0x3c
 #define ST_MAGN_1_FS_AVL_4700_VAL              0x05
 #define ST_MAGN_1_FS_AVL_5600_VAL              0x06
 #define ST_MAGN_1_FS_AVL_8100_VAL              0x07
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY          1100
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY          855
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY          670
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY          450
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY          400
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY          330
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY          230
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z           980
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z           760
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z           600
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z           400
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z           355
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z           295
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z           205
+#define ST_MAGN_1_FS_AVL_1300_GAIN_XY          909
+#define ST_MAGN_1_FS_AVL_1900_GAIN_XY          1169
+#define ST_MAGN_1_FS_AVL_2500_GAIN_XY          1492
+#define ST_MAGN_1_FS_AVL_4000_GAIN_XY          2222
+#define ST_MAGN_1_FS_AVL_4700_GAIN_XY          2500
+#define ST_MAGN_1_FS_AVL_5600_GAIN_XY          3030
+#define ST_MAGN_1_FS_AVL_8100_GAIN_XY          4347
+#define ST_MAGN_1_FS_AVL_1300_GAIN_Z           1020
+#define ST_MAGN_1_FS_AVL_1900_GAIN_Z           1315
+#define ST_MAGN_1_FS_AVL_2500_GAIN_Z           1666
+#define ST_MAGN_1_FS_AVL_4000_GAIN_Z           2500
+#define ST_MAGN_1_FS_AVL_4700_GAIN_Z           2816
+#define ST_MAGN_1_FS_AVL_5600_GAIN_Z           3389
+#define ST_MAGN_1_FS_AVL_8100_GAIN_Z           4878
 #define ST_MAGN_1_MULTIREAD_BIT                        false
 
 /* CUSTOM VALUES FOR SENSOR 2 */
 #define ST_MAGN_2_FS_MASK                      0x60
 #define ST_MAGN_2_FS_AVL_4000_VAL              0x00
 #define ST_MAGN_2_FS_AVL_8000_VAL              0x01
-#define ST_MAGN_2_FS_AVL_10000_VAL             0x02
-#define ST_MAGN_2_FS_AVL_4000_GAIN             430
-#define ST_MAGN_2_FS_AVL_8000_GAIN             230
-#define ST_MAGN_2_FS_AVL_10000_GAIN            230
+#define ST_MAGN_2_FS_AVL_12000_VAL             0x02
+#define ST_MAGN_2_FS_AVL_16000_VAL             0x03
+#define ST_MAGN_2_FS_AVL_4000_GAIN             146
+#define ST_MAGN_2_FS_AVL_8000_GAIN             292
+#define ST_MAGN_2_FS_AVL_12000_GAIN            438
+#define ST_MAGN_2_FS_AVL_16000_GAIN            584
 #define ST_MAGN_2_MULTIREAD_BIT                        false
 #define ST_MAGN_2_OUT_X_L_ADDR                 0x28
 #define ST_MAGN_2_OUT_Y_L_ADDR                 0x2a
@@ -266,9 +269,14 @@ static const struct st_sensors st_magn_sensors[] = {
                                        .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
                                },
                                [2] = {
-                                       .num = ST_MAGN_FS_AVL_10000MG,
-                                       .value = ST_MAGN_2_FS_AVL_10000_VAL,
-                                       .gain = ST_MAGN_2_FS_AVL_10000_GAIN,
+                                       .num = ST_MAGN_FS_AVL_12000MG,
+                                       .value = ST_MAGN_2_FS_AVL_12000_VAL,
+                                       .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+                               },
+                               [3] = {
+                                       .num = ST_MAGN_FS_AVL_16000MG,
+                                       .value = ST_MAGN_2_FS_AVL_16000_VAL,
+                                       .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
                                },
                        },
                },
index a3a2e9c1639b1c9373c3b41d38fa27c9b106af14..df0c4f605a219c1f3f5cd5faabeadc705622b65e 100644 (file)
@@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        umem->length    = size;
        umem->offset    = addr & ~PAGE_MASK;
        umem->page_size = PAGE_SIZE;
+       umem->pid       = get_task_pid(current, PIDTYPE_PID);
        /*
         * We ask for writable memory if any access flags other than
         * "remote read" are set.  "Local write" and "remote write"
@@ -198,6 +199,7 @@ out:
        if (ret < 0) {
                if (need_release)
                        __ib_umem_release(context->device, umem, 0);
+               put_pid(umem->pid);
                kfree(umem);
        } else
                current->mm->pinned_vm = locked;
@@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem)
 {
        struct ib_ucontext *context = umem->context;
        struct mm_struct *mm;
+       struct task_struct *task;
        unsigned long diff;
 
        __ib_umem_release(umem->context->device, umem, 1);
 
-       mm = get_task_mm(current);
-       if (!mm) {
-               kfree(umem);
-               return;
-       }
+       task = get_pid_task(umem->pid, PIDTYPE_PID);
+       put_pid(umem->pid);
+       if (!task)
+               goto out;
+       mm = get_task_mm(task);
+       put_task_struct(task);
+       if (!mm)
+               goto out;
 
        diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 
@@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem)
        } else
                down_write(&mm->mmap_sem);
 
-       current->mm->pinned_vm -= diff;
+       mm->pinned_vm -= diff;
        up_write(&mm->mmap_sem);
        mmput(mm);
+out:
        kfree(umem);
 }
 EXPORT_SYMBOL(ib_umem_release);
index e7bee46868d1cc8be8da98eee2e427de6ca37b20..abd97247443e437ffff62e8238610657ca355394 100644 (file)
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
        dst->packet_life_time   = src->packet_life_time;
        dst->preference         = src->preference;
        dst->packet_life_time_selector = src->packet_life_time_selector;
+
+       memset(dst->smac, 0, sizeof(dst->smac));
+       memset(dst->dmac, 0, sizeof(dst->dmac));
+       dst->vlan_id = 0xffff;
 }
 EXPORT_SYMBOL(ib_copy_path_rec_from_user);
index dc66c450691602f572b53e53a8dbacfca16e9c4b..1da1252dcdb302ecb84ff0ec26f175cad908ecc4 100644 (file)
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
 
 /* call with current->mm->mmap_sem held */
 static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                                 struct page **p, struct vm_area_struct **vma)
+                                 struct page **p)
 {
        unsigned long lock_limit;
        size_t got;
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
                ret = get_user_pages(current, current->mm,
                                     start_page + got * PAGE_SIZE,
                                     num_pages - got, 1, 1,
-                                    p + got, vma);
+                                    p + got, NULL);
                if (ret < 0)
                        goto bail_release;
        }
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
+       ret = __ipath_get_user_pages(start_page, num_pages, p);
 
        up_write(&current->mm->mmap_sem);
 
index e1e558a3d692bbd8b907a84efe222481b0ae1a8e..bda5994ceb68c910097c712f5d44306840eb8727 100644 (file)
@@ -59,6 +59,7 @@
 
 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+#define MLX4_IB_CARD_REV_A0   0xA0
 
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
        return dmfs;
 }
 
+static int num_ib_ports(struct mlx4_dev *dev)
+{
+       int ib_ports = 0;
+       int i;
+
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+               ib_ports++;
+
+       return ib_ports;
+}
+
 static int mlx4_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
        int err = -ENOMEM;
+       int have_ib_ports;
 
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 
        memset(props, 0, sizeof *props);
 
+       have_ib_ports = num_ib_ports(dev->dev);
+
        props->fw_ver = dev->dev->caps.fw_ver;
        props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
                IB_DEVICE_PORT_ACTIVE_EVENT             |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
-       if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
+       if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
                props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
-       if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+       if (dev->dev->caps.max_gso_sz &&
+           (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
+           (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
                props->device_cap_flags |= IB_DEVICE_UD_TSO;
        if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
                props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->state            = IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
        props->active_mtu       = IB_MTU_256;
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
        ndev = iboe->netdevs[port - 1];
        if (!ndev)
                goto out_unlock;
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
                                        IB_PORT_ACTIVE : IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
 out_unlock:
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
 out:
        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
        return err;
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
        if (!mqp->port)
                return 0;
 
-       spin_lock(&mdev->iboe.lock);
+       spin_lock_bh(&mdev->iboe.lock);
        ndev = mdev->iboe.netdevs[mqp->port - 1];
        if (ndev)
                dev_hold(ndev);
-       spin_unlock(&mdev->iboe.lock);
+       spin_unlock_bh(&mdev->iboe.lock);
 
        if (ndev) {
                ret = 1;
@@ -1089,6 +1106,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
        return err;
 }
 
+static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+                                   u64 *reg_id)
+{
+       void *ib_flow;
+       union ib_flow_spec *ib_spec;
+       struct mlx4_dev *dev = to_mdev(qp->device)->dev;
+       int err = 0;
+
+       if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+               return 0; /* do nothing */
+
+       ib_flow = flow_attr + 1;
+       ib_spec = (union ib_flow_spec *)ib_flow;
+
+       if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
+               return 0; /* do nothing */
+
+       err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
+                                   flow_attr->port, qp->qp_num,
+                                   MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
+                                   reg_id);
+       return err;
+}
+
 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                                    struct ib_flow_attr *flow_attr,
                                    int domain)
@@ -1136,6 +1177,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                i++;
        }
 
+       if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+               err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+               if (err)
+                       goto err_free;
+       }
+
        return &mflow->ibflow;
 
 err_free:
@@ -1262,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        mutex_lock(&mqp->mutex);
        ge = find_gid_entry(mqp, gid->raw);
        if (ge) {
-               spin_lock(&mdev->iboe.lock);
+               spin_lock_bh(&mdev->iboe.lock);
                ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
                if (ndev)
                        dev_hold(ndev);
-               spin_unlock(&mdev->iboe.lock);
+               spin_unlock_bh(&mdev->iboe.lock);
                if (ndev)
                        dev_put(ndev);
                list_del(&ge->list);
@@ -1387,6 +1434,9 @@ static void update_gids_task(struct work_struct *work)
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
 
+       if (!gw->dev->ib_active)
+               return;
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
@@ -1417,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work)
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
 
+       if (!gw->dev->ib_active)
+               return;
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                pr_warn("reset gid table failed\n");
@@ -1551,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                return 0;
 
        iboe = &ibdev->iboe;
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
 
        for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
                if ((netif_is_bond_master(real_dev) &&
@@ -1561,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                        update_gid_table(ibdev, port, gid,
                                         event == NETDEV_DOWN, 0);
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
        return 0;
 
 }
@@ -1634,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
        new_smac = mlx4_mac_to_u64(dev->dev_addr);
        read_unlock(&dev_base_lock);
 
+       atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
+
+       /* no need for update QP1 and mac registration in non-SRIOV */
+       if (!mlx4_is_mfunc(ibdev->dev))
+               return;
+
        mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
        qp = ibdev->qp1_proxy[port - 1];
        if (qp) {
                int new_smac_index;
-               u64 old_smac = qp->pri.smac;
+               u64 old_smac;
                struct mlx4_update_qp_params update_params;
 
+               mutex_lock(&qp->mutex);
+               old_smac = qp->pri.smac;
                if (new_smac == old_smac)
                        goto unlock;
 
@@ -1650,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
                        goto unlock;
 
                update_params.smac_index = new_smac_index;
-               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+               if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
                                   &update_params)) {
                        release_mac = new_smac;
                        goto unlock;
                }
-
+               /* if old port was zero, no mac was yet registered for this QP */
+               if (qp->pri.smac_port)
+                       release_mac = old_smac;
                qp->pri.smac = new_smac;
+               qp->pri.smac_port = port;
                qp->pri.smac_index = new_smac_index;
-
-               release_mac = old_smac;
        }
 
 unlock:
-       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
        if (release_mac != MLX4_IB_INVALID_MAC)
                mlx4_unregister_mac(ibdev->dev, port, release_mac);
+       if (qp)
+               mutex_unlock(&qp->mutex);
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
 }
 
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
@@ -1676,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
        struct inet6_dev *in6_dev;
        union ib_gid  *pgid;
        struct inet6_ifaddr *ifp;
+       union ib_gid default_gid;
 #endif
        union ib_gid gid;
 
@@ -1696,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
                in_dev_put(in_dev);
        }
 #if IS_ENABLED(CONFIG_IPV6)
+       mlx4_make_default_gid(dev, &default_gid);
        /* IPv6 gids */
        in6_dev = in6_dev_get(dev);
        if (in6_dev) {
                read_lock_bh(&in6_dev->lock);
                list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
                        pgid = (union ib_gid *)&ifp->addr;
+                       if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
+                               continue;
                        update_gid_table(ibdev, port, pgid, 0, 0);
                }
                read_unlock_bh(&in6_dev->lock);
@@ -1723,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        struct  net_device *dev;
        struct mlx4_ib_iboe *iboe = &ibdev->iboe;
        int i;
+       int err = 0;
 
-       for (i = 1; i <= ibdev->num_ports; ++i)
-               if (reset_gid_table(ibdev, i))
-                       return -1;
+       for (i = 1; i <= ibdev->num_ports; ++i) {
+               if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
+                   IB_LINK_LAYER_ETHERNET) {
+                       err = reset_gid_table(ibdev, i);
+                       if (err)
+                               goto out;
+               }
+       }
 
        read_lock(&dev_base_lock);
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
 
        for_each_netdev(&init_net, dev) {
                u8 port = mlx4_ib_get_dev_port(dev, ibdev);
-               if (port)
+               /* port will be non-zero only for ETH ports */
+               if (port) {
+                       mlx4_ib_set_default_gid(ibdev, dev, port);
                        mlx4_ib_get_dev_addr(dev, ibdev, port);
+               }
        }
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
        read_unlock(&dev_base_lock);
-
-       return 0;
+out:
+       return err;
 }
 
 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1754,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
 
        iboe = &ibdev->iboe;
 
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
        mlx4_foreach_ib_transport_port(port, ibdev->dev) {
                enum ib_port_state      port_state = IB_PORT_NOP;
                struct net_device *old_master = iboe->masters[port - 1];
@@ -1786,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
                        mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               } else {
-                       reset_gid_table(ibdev, port);
-               }
-               /* if using bonding/team and a slave port is down, we don't the bond IP
-                * based gids in the table since flows that select port by gid may get
-                * the down port.
-                */
-               if (curr_master && (port_state == IB_PORT_DOWN)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               }
-               /* if bonding is used it is possible that we add it to masters
-                * only after IP address is assigned to the net bonding
-                * interface.
-               */
-               if (curr_master && (old_master != curr_master)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_master, ibdev, port);
-               }
+                       if (curr_master) {
+                               /* if using bonding/team and a slave port is down, we
+                                * don't want the bond IP based gids in the table since
+                                * flows that select port by gid may get the down port.
+                               */
+                               if (port_state == IB_PORT_DOWN) {
+                                       reset_gid_table(ibdev, port);
+                                       mlx4_ib_set_default_gid(ibdev,
+                                                               curr_netdev,
+                                                               port);
+                               } else {
+                                       /* gids from the upper dev (bond/team)
+                                        * should appear in port's gid table
+                                       */
+                                       mlx4_ib_get_dev_addr(curr_master,
+                                                            ibdev, port);
+                               }
+                       }
+                       /* if bonding is used it is possible that we add it to
+                        * masters only after IP address is assigned to the
+                        * net bonding interface.
+                       */
+                       if (curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+                       }
 
-               if (!curr_master && (old_master != curr_master)) {
+                       if (!curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+                       }
+               } else {
                        reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
                }
        }
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
 
        if (update_qps_port > 0)
                mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2156,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        goto err_steer_free_bitmap;
        }
 
+       for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
+               atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
+
        if (ib_register_device(&ibdev->ib_dev, NULL))
                goto err_steer_free_bitmap;
 
@@ -2192,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        }
                }
 #endif
-               for (i = 1 ; i <= ibdev->num_ports ; ++i)
-                       reset_gid_table(ibdev, i);
-               rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
-               rtnl_unlock();
-               mlx4_ib_init_gid_table(ibdev);
+               if (mlx4_ib_init_gid_table(ibdev))
+                       goto err_notif;
        }
 
        for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2345,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        struct mlx4_ib_dev *ibdev = ibdev_ptr;
        int p;
 
+       ibdev->ib_active = false;
+       flush_workqueue(wq);
+
        mlx4_ib_close_sriov(ibdev);
        mlx4_ib_mad_cleanup(ibdev);
        ib_unregister_device(&ibdev->ib_dev);
index e8cad3926bfc350ba3d505327a29d19136e6ff10..6eb743f65f6f5633eb126ae5fe7ac768299fe77d 100644 (file)
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
        spinlock_t              lock;
        struct net_device      *netdevs[MLX4_MAX_PORTS];
        struct net_device      *masters[MLX4_MAX_PORTS];
+       atomic64_t              mac[MLX4_MAX_PORTS];
        struct notifier_block   nb;
        struct notifier_block   nb_inet;
        struct notifier_block   nb_inet6;
index 9b0e80e59b087af9cac810977fbcefc7a939ffc6..8f9325cfc85d3638dbb407573e30d3388cf3e934 100644 (file)
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                                        0);
                if (IS_ERR(mmr->umem)) {
                        err = PTR_ERR(mmr->umem);
+                       /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
                        mmr->umem = NULL;
                        goto release_mpt_entry;
                }
                n = ib_umem_page_count(mmr->umem);
                shift = ilog2(mmr->umem->page_size);
 
-               mmr->mmr.iova       = virt_addr;
-               mmr->mmr.size       = length;
                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
                                              virt_addr, length, n, shift,
                                              *pmpt_entry);
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                        ib_umem_release(mmr->umem);
                        goto release_mpt_entry;
                }
+               mmr->mmr.iova       = virt_addr;
+               mmr->mmr.size       = length;
 
                err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
                if (err) {
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
         * return a failure. But dereg_mr will free the resources.
         */
        err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
+       if (!err && flags & IB_MR_REREG_ACCESS)
+               mmr->mmr.access = mr_access_flags;
 
 release_mpt_entry:
        mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
index 67780452f0cfb85d72fc98bfd0faefbe005fb32a..9c5150c3cb311a147195eab8f8a7b9290e6629fd 100644 (file)
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
                                   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
                        pr_warn("modify QP %06x to RESET failed.\n",
                               qp->mqp.qpn);
-               if (qp->pri.smac) {
+               if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
                        mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = 0;
+                       qp->pri.smac_port = 0;
                }
                if (qp->alt.smac) {
                        mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
                 * If one was already assigned, but the new mac differs,
                 * unregister the old one and register the new one.
                */
-               if (!smac_info->smac || smac_info->smac != smac) {
+               if ((!smac_info->smac && !smac_info->smac_port) ||
+                   smac_info->smac != smac) {
                        /* register candidate now, unreg if needed, after success */
                        smac_index = mlx4_register_mac(dev->dev, port, smac);
                        if (smac_index >= 0) {
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
                                    struct mlx4_qp_context *context)
 {
-       struct net_device *ndev;
        u64 u64_mac;
        int smac_index;
 
-
-       ndev = dev->iboe.netdevs[qp->port - 1];
-       if (ndev) {
-               smac = ndev->dev_addr;
-               u64_mac = mlx4_mac_to_u64(smac);
-       } else {
-               u64_mac = dev->dev->caps.def_mac[qp->port];
-       }
+       u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
 
        context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
-       if (!qp->pri.smac) {
+       if (!qp->pri.smac && !qp->pri.smac_port) {
                smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
                if (smac_index >= 0) {
                        qp->pri.candidate_smac_index = smac_index;
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        int steer_qp = 0;
        int err = -EINVAL;
 
+       /* APM is not supported under RoCE */
+       if (attr_mask & IB_QP_ALT_PATH &&
+           rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+           IB_LINK_LAYER_ETHERNET)
+               return -ENOTSUPP;
+
        context = kzalloc(sizeof *context, GFP_KERNEL);
        if (!context)
                return -ENOMEM;
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                }
        }
 
-       if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+       if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
                context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
                                        MLX4_IB_LINK_TYPE_ETH;
+               if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+                       /* set QP to receive both tunneled & non-tunneled packets */
+                       if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+                               context->srqn = cpu_to_be32(7 << 28);
+               }
+       }
 
        if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
                int is_eth = rdma_port_get_link_layer(
@@ -1780,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                        if (qp->flags & MLX4_IB_QP_NETIF)
                                mlx4_ib_steer_qp_reg(dev, qp, 0);
                }
-               if (qp->pri.smac) {
+               if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
                        mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = 0;
+                       qp->pri.smac_port = 0;
                }
                if (qp->alt.smac) {
                        mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1806,11 +1813,12 @@ out:
        if (err && steer_qp)
                mlx4_ib_steer_qp_reg(dev, qp, 0);
        kfree(context);
-       if (qp->pri.candidate_smac) {
+       if (qp->pri.candidate_smac ||
+           (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
                if (err) {
                        mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
                } else {
-                       if (qp->pri.smac)
+                       if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
                                mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = qp->pri.candidate_smac;
                        qp->pri.smac_index = qp->pri.candidate_smac_index;
@@ -2083,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
        return 0;
 }
 
+static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
+{
+       int i;
+
+       for (i = ETH_ALEN; i; i--) {
+               dst_mac[i - 1] = src_mac & 0xff;
+               src_mac >>= 8;
+       }
+}
+
 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                            void *wqe, unsigned *mlx_seg_len)
 {
@@ -2197,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        }
 
        if (is_eth) {
-               u8 *smac;
                struct in6_addr in6;
 
                u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -2210,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
                memcpy(&in6, sgid.raw, sizeof(in6));
 
-               if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
-                       smac = to_mdev(sqp->qp.ibqp.device)->
-                               iboe.netdevs[sqp->qp.port - 1]->dev_addr;
-               else    /* use the src mac of the tunnel */
-                       smac = ah->av.eth.s_mac;
-               memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+               if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                       u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
+                       u8 smac[ETH_ALEN];
+
+                       mlx4_u64_to_smac(smac, mac);
+                       memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
+               } else {
+                       /* use the src mac of the tunnel */
+                       memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
+               }
+
                if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
                        mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
                if (!is_vlan) {
index 40f8536c10b00e60db462ab9eaef26fd7a46945c..ac02ce4e804045afa74df9cb34b1f50d1b146361 100644 (file)
@@ -38,7 +38,7 @@
 #define OCRDMA_VID_PCP_SHIFT   0xD
 
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
-                               struct ib_ah_attr *attr, int pdid)
+                       struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
 {
        int status = 0;
        u16 vlan_tag; bool vlan_enabled = false;
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        memset(&eth, 0, sizeof(eth));
        memset(&grh, 0, sizeof(grh));
 
-       ah->sgid_index = attr->grh.sgid_index;
-
+       /* VLAN */
        vlan_tag = attr->vlan_id;
        if (!vlan_tag || (vlan_tag > 0xFFF))
                vlan_tag = dev->pvid;
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                eth_sz = sizeof(struct ocrdma_eth_basic);
        }
+       /* MAC */
        memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
-       memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
        status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
        if (status)
                return status;
-       status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
-                       (union ib_gid *)&grh.sgid[0]);
-       if (status)
-               return status;
+       ah->sgid_index = attr->grh.sgid_index;
+       memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+       memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
 
        grh.tclass_flow = cpu_to_be32((6 << 28) |
                        (attr->grh.traffic_class << 24) |
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        /* 0x1b is next header value in GRH */
        grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
                        (0x1b << 8) | attr->grh.hop_limit);
-
-       memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
+       /* Eth HDR */
        memcpy(&ah->av->eth_hdr, &eth, eth_sz);
        memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
        if (vlan_enabled)
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        struct ocrdma_ah *ah;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
        struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+       union ib_gid sgid;
+       u8 zmac[ETH_ALEN];
 
        if (!(attr->ah_flags & IB_AH_GRH))
                return ERR_PTR(-EINVAL);
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        status = ocrdma_alloc_av(dev, ah);
        if (status)
                goto av_err;
-       status = set_av_attr(dev, ah, attr, pd->id);
+
+       status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
+       if (status) {
+               pr_err("%s(): Failed to query sgid, status = %d\n",
+                     __func__, status);
+               goto av_conf_err;
+       }
+
+       memset(&zmac, 0, ETH_ALEN);
+       if (pd->uctx &&
+           memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+               status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
+                                        attr->dmac, &attr->vlan_id);
+               if (status) {
+                       pr_err("%s(): Failed to resolve dmac from gid." 
+                               "status = %d\n", __func__, status);
+                       goto av_conf_err;
+               }
+       }
+
+       status = set_av_attr(dev, ah, attr, &sgid, pd->id);
        if (status)
                goto av_conf_err;
 
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
        struct ocrdma_av *av = ah->av;
        struct ocrdma_grh *grh;
        attr->ah_flags |= IB_AH_GRH;
-       if (ah->av->valid & Bit(1)) {
+       if (ah->av->valid & OCRDMA_AV_VALID) {
                grh = (struct ocrdma_grh *)((u8 *)ah->av +
                                sizeof(struct ocrdma_eth_vlan));
                attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
index acb434d169036e0781eef89a10d6c72c32936554..8f5f2577f28855eae258b278484a2bdda2eaf396 100644 (file)
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
        attr->max_srq_sge = dev->attr.max_srq_sge;
        attr->max_srq_wr = dev->attr.max_rqe;
        attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
-       attr->max_fast_reg_page_list_len = 0;
+       attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
        attr->max_pkeys = 1;
        return 0;
 }
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
        if (cq->first_arm) {
                ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
                cq->first_arm = false;
-               goto skip_defer;
        }
-       cq->deferred_arm = true;
 
-skip_defer:
+       cq->deferred_arm = true;
        cq->deferred_sol = sol_needed;
        spin_unlock_irqrestore(&cq->cq_lock, flags);
 
index 799a0c3bffc4a0e3bc10570e456630d1bea43f4f..6abd3ed3cd51ecf2c0a21927b0cf4894f7c48941 100644 (file)
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
        struct qib_qp_iter *iter;
        loff_t n = *pos;
 
+       rcu_read_lock();
        iter = qib_qp_iter_init(s->private);
        if (!iter)
                return NULL;
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
 {
-       /* nothing for now */
+       rcu_read_unlock();
 }
 
 static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
index 7fcc150d603c4d341034d6c4b9e9847588dfb69f..6ddc0264aad2779ef327161c14a2ee9aef2e543f 100644 (file)
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
        struct qib_qp *pqp = iter->qp;
        struct qib_qp *qp;
 
-       rcu_read_lock();
        for (; n < dev->qp_table_size; n++) {
                if (pqp)
                        qp = rcu_dereference(pqp->next);
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
                        qp = rcu_dereference(dev->qp_table[n]);
                pqp = qp;
                if (qp) {
-                       if (iter->qp)
-                               atomic_dec(&iter->qp->refcount);
-                       atomic_inc(&qp->refcount);
-                       rcu_read_unlock();
                        iter->qp = qp;
                        iter->n = n;
                        return 0;
                }
        }
-       rcu_read_unlock();
-       if (iter->qp)
-               atomic_dec(&iter->qp->refcount);
        return ret;
 }
 
index 2bc1d2b96298dd5d59029cb141d83a715ab51f54..74f90b2619f6f5169cc4c326e94ba5343b067753 100644 (file)
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
  * Call with current->mm->mmap_sem held.
  */
 static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
-                               struct page **p, struct vm_area_struct **vma)
+                               struct page **p)
 {
        unsigned long lock_limit;
        size_t got;
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
                ret = get_user_pages(current, current->mm,
                                     start_page + got * PAGE_SIZE,
                                     num_pages - got, 1, 1,
-                                    p + got, vma);
+                                    p + got, NULL);
                if (ret < 0)
                        goto bail_release;
        }
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
+       ret = __qib_get_user_pages(start_page, num_pages, p);
 
        up_write(&current->mm->mmap_sem);
 
index 3edce617c31b22de7e4e7f5d2478c5bd71ba3f46..d7562beb542367faf1b93d7ba66e8ef879c73bf4 100644 (file)
@@ -131,6 +131,12 @@ struct ipoib_cb {
        u8                      hwaddr[INFINIBAND_ALEN];
 };
 
+static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
+       return (struct ipoib_cb *)skb->cb;
+}
+
 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
 struct ipoib_mcast {
        struct ib_sa_mcmember_rec mcmember;
index 1310acf6bf923786a09692bed47b5fcb49ca7e45..13e6e04315920d7f361129945332cdbf41f8a930 100644 (file)
@@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_neigh *neigh;
-       struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+       struct ipoib_cb *cb = ipoib_skb_cb(skb);
        struct ipoib_header *header;
        unsigned long flags;
 
@@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
                             const void *daddr, const void *saddr, unsigned len)
 {
        struct ipoib_header *header;
-       struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+       struct ipoib_cb *cb = ipoib_skb_cb(skb);
 
        header = (struct ipoib_header *) skb_push(skb, sizeof *header);
 
index d4e005720d0181729096b4fb8a81aa21498f023b..ffb83b5f7e805e411f1506d66a53f8465b90c439 100644 (file)
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
                          port_attr.state);
                return;
        }
+       priv->local_lid = port_attr.lid;
 
        if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
                ipoib_warn(priv, "ib_query_gid() failed\n");
        else
                memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
 
-       {
-               struct ib_port_attr attr;
-
-               if (!ib_query_port(priv->ca, priv->port, &attr))
-                       priv->local_lid = attr.lid;
-               else
-                       ipoib_warn(priv, "ib_query_port failed\n");
-       }
-
        if (!priv->broadcast) {
                struct ipoib_mcast *broadcast;
 
index 61ee91d883806322f60c79eaae6ec1dde865ad54..93ce62fe1594345cd66bb19cd116cebdc2329490 100644 (file)
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
                     int is_leading)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
-       struct iscsi_session *session;
        struct iser_conn *ib_conn;
        struct iscsi_endpoint *ep;
        int error;
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        }
        ib_conn = ep->dd_data;
 
-       session = conn->session;
-       if (iser_alloc_rx_descriptors(ib_conn, session))
-               return -ENOMEM;
+       mutex_lock(&ib_conn->state_mutex);
+       if (ib_conn->state != ISER_CONN_UP) {
+               error = -EINVAL;
+               iser_err("iser_conn %p state is %d, teardown started\n",
+                        ib_conn, ib_conn->state);
+               goto out;
+       }
+
+       error = iser_alloc_rx_descriptors(ib_conn, conn->session);
+       if (error)
+               goto out;
 
        /* binds the iSER connection retrieved from the previously
         * connected ep_handle to the iSCSI layer connection. exchanges
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        conn->dd_data = ib_conn;
        ib_conn->iscsi_conn = conn;
 
-       return 0;
+out:
+       mutex_unlock(&ib_conn->state_mutex);
+       return error;
 }
 
 static int
index c877dad381cb95acb1979e158536b281265f5573..9f0e0e34d6ca587af2c627c20fc141f6c0422b38 100644 (file)
@@ -69,7 +69,7 @@
 
 #define DRV_NAME       "iser"
 #define PFX            DRV_NAME ": "
-#define DRV_VER                "1.4"
+#define DRV_VER                "1.4.1"
 
 #define iser_dbg(fmt, arg...)                          \
        do {                                            \
index 3ef167f97d6fd8468a266b3d45198ae72e30b198..3bfec4bbda5263ee636171c79f12f8548615823e 100644 (file)
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
 {
        struct iser_cq_desc *cq_desc;
        struct ib_device_attr *dev_attr = &device->dev_attr;
-       int ret, i, j;
+       int ret, i;
 
        ret = ib_query_device(device->ib_device, dev_attr);
        if (ret) {
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device)
                                          iser_cq_event_callback,
                                          (void *)&cq_desc[i],
                                          ISER_MAX_RX_CQ_LEN, i);
-               if (IS_ERR(device->rx_cq[i]))
+               if (IS_ERR(device->rx_cq[i])) {
+                       device->rx_cq[i] = NULL;
                        goto cq_err;
+               }
 
                device->tx_cq[i] = ib_create_cq(device->ib_device,
                                          NULL, iser_cq_event_callback,
                                          (void *)&cq_desc[i],
                                          ISER_MAX_TX_CQ_LEN, i);
 
-               if (IS_ERR(device->tx_cq[i]))
+               if (IS_ERR(device->tx_cq[i])) {
+                       device->tx_cq[i] = NULL;
                        goto cq_err;
+               }
 
                if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
                        goto cq_err;
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
 handler_err:
        ib_dereg_mr(device->mr);
 dma_mr_err:
-       for (j = 0; j < device->cqs_used; j++)
-               tasklet_kill(&device->cq_tasklet[j]);
+       for (i = 0; i < device->cqs_used; i++)
+               tasklet_kill(&device->cq_tasklet[i]);
 cq_err:
-       for (j = 0; j < i; j++) {
-               if (device->tx_cq[j])
-                       ib_destroy_cq(device->tx_cq[j]);
-               if (device->rx_cq[j])
-                       ib_destroy_cq(device->rx_cq[j]);
+       for (i = 0; i < device->cqs_used; i++) {
+               if (device->tx_cq[i])
+                       ib_destroy_cq(device->tx_cq[i]);
+               if (device->rx_cq[i])
+                       ib_destroy_cq(device->rx_cq[i]);
        }
        ib_dealloc_pd(device->pd);
 pd_err:
index d4c7928a0f36143844f3ca0b4735f9c22a1c148a..da8ff124762a3362eab5f26c1cd52f4bcfdac099 100644 (file)
@@ -586,17 +586,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        init_completion(&isert_conn->conn_wait);
        init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
-       kref_get(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
        spin_lock_init(&isert_conn->conn_lock);
        INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 
        cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
-       isert_conn->responder_resources = event->param.conn.responder_resources;
-       isert_conn->initiator_depth = event->param.conn.initiator_depth;
-       pr_debug("Using responder_resources: %u initiator_depth: %u\n",
-                isert_conn->responder_resources, isert_conn->initiator_depth);
 
        isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
                                        ISER_RX_LOGIN_SIZE, GFP_KERNEL);
@@ -643,6 +638,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_rsp_dma_map;
        }
 
+       /* Set max inflight RDMA READ requests */
+       isert_conn->initiator_depth = min_t(u8,
+                               event->param.conn.initiator_depth,
+                               device->dev_attr.max_qp_init_rd_atom);
+       pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+
        isert_conn->conn_device = device;
        isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
        if (IS_ERR(isert_conn->conn_pd)) {
@@ -746,7 +747,9 @@ isert_connect_release(struct isert_conn *isert_conn)
 static void
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
-       return;
+       struct isert_conn *isert_conn = cma_id->context;
+
+       kref_get(&isert_conn->conn_kref);
 }
 
 static void
@@ -798,7 +801,6 @@ isert_disconnect_work(struct work_struct *work)
 
 wake_up:
        complete(&isert_conn->conn_wait);
-       isert_put_conn(isert_conn);
 }
 
 static void
@@ -3067,7 +3069,6 @@ isert_rdma_accept(struct isert_conn *isert_conn)
        int ret;
 
        memset(&cp, 0, sizeof(struct rdma_conn_param));
-       cp.responder_resources = isert_conn->responder_resources;
        cp.initiator_depth = isert_conn->initiator_depth;
        cp.retry_count = 7;
        cp.rnr_retry_count = 7;
@@ -3215,7 +3216,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        pr_debug("isert_wait_conn: Starting \n");
 
        mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->conn_cm_id) {
+       if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
                pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
                rdma_disconnect(isert_conn->conn_cm_id);
        }
@@ -3234,6 +3235,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        wait_for_completion(&isert_conn->conn_wait_comp_err);
 
        wait_for_completion(&isert_conn->conn_wait);
+       isert_put_conn(isert_conn);
 }
 
 static void isert_free_conn(struct iscsi_conn *conn)
index 2dd1d0dd4f7de03233752e57704ff60c17e6d992..6f5d79569136f3adf0bf9248988b107426b85e9c 100644 (file)
@@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
-               },
-               .callback = atkbd_deactivate_fixup,
-       },
-       {
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
                },
                .callback = atkbd_deactivate_fixup,
        },
index 180b184ab90f31935e1fa9c71bd7eb8fa4e68593..d70b65a14ced24ffe09e298562de54ab9b9a52e4 100644 (file)
@@ -33,8 +33,8 @@
 #define CAP1106_REG_SENSOR_CONFIG      0x22
 #define CAP1106_REG_SENSOR_CONFIG2     0x23
 #define CAP1106_REG_SAMPLING_CONFIG    0x24
-#define CAP1106_REG_CALIBRATION                0x25
-#define CAP1106_REG_INT_ENABLE         0x26
+#define CAP1106_REG_CALIBRATION                0x26
+#define CAP1106_REG_INT_ENABLE         0x27
 #define CAP1106_REG_REPEAT_RATE                0x28
 #define CAP1106_REG_MT_CONFIG          0x2a
 #define CAP1106_REG_MT_PATTERN_CONFIG  0x2b
index 8d2e19e81e1e59b63c31eccd989b441f503e8706..e651fa692afef3959f0d48ee844a40c3ccb2aba0 100644 (file)
@@ -332,23 +332,24 @@ static int matrix_keypad_init_gpio(struct platform_device *pdev,
        }
 
        if (pdata->clustered_irq > 0) {
-               err = request_irq(pdata->clustered_irq,
+               err = request_any_context_irq(pdata->clustered_irq,
                                matrix_keypad_interrupt,
                                pdata->clustered_irq_flags,
                                "matrix-keypad", keypad);
-               if (err) {
+               if (err < 0) {
                        dev_err(&pdev->dev,
                                "Unable to acquire clustered interrupt\n");
                        goto err_free_rows;
                }
        } else {
                for (i = 0; i < pdata->num_row_gpios; i++) {
-                       err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+                       err = request_any_context_irq(
+                                       gpio_to_irq(pdata->row_gpios[i]),
                                        matrix_keypad_interrupt,
                                        IRQF_TRIGGER_RISING |
                                        IRQF_TRIGGER_FALLING,
                                        "matrix-keypad", keypad);
-                       if (err) {
+                       if (err < 0) {
                                dev_err(&pdev->dev,
                                        "Unable to acquire interrupt for GPIO line %i\n",
                                        pdata->row_gpios[i]);
index a956b980ee73f16fb8eeeae2c9f19bbce0e049f9..35a49bf572273e464399944c05e8072a7b06e138 100644 (file)
@@ -2373,6 +2373,10 @@ int alps_init(struct psmouse *psmouse)
        dev2->keybit[BIT_WORD(BTN_LEFT)] =
                BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
 
+       __set_bit(INPUT_PROP_POINTER, dev2->propbit);
+       if (priv->flags & ALPS_DUALPOINT)
+               __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
+
        if (input_register_device(priv->dev2))
                goto init_fail;
 
index da51738eb59e712067cd5d4db8b85a3e112739fa..06fc6e76ffbe0cf725053b8a1bdb3faaefa1c629 100644 (file)
@@ -1331,6 +1331,13 @@ static bool elantech_is_signature_valid(const unsigned char *param)
        if (param[1] == 0)
                return true;
 
+       /*
+        * Some models have a revision higher then 20. Meaning param[2] may
+        * be 10 or 20, skip the rates check for these.
+        */
+       if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+               return true;
+
        for (i = 0; i < ARRAY_SIZE(rates); i++)
                if (param[2] == rates[i])
                        return false;
@@ -1607,6 +1614,10 @@ int elantech_init(struct psmouse *psmouse)
                tp_dev->keybit[BIT_WORD(BTN_LEFT)] =
                        BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) |
                        BIT_MASK(BTN_RIGHT);
+
+               __set_bit(INPUT_PROP_POINTER, tp_dev->propbit);
+               __set_bit(INPUT_PROP_POINTING_STICK, tp_dev->propbit);
+
                error = input_register_device(etd->tp_dev);
                if (error < 0)
                        goto init_fail_tp_reg;
index cff065f6261cf31a3188226f14fe28e9291f6770..b4e1f014ddc2297affeda8abc73c15954fdfabf0 100644 (file)
@@ -670,6 +670,8 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
        __set_bit(REL_X, input_dev->relbit);
        __set_bit(REL_Y, input_dev->relbit);
 
+       __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
        psmouse->set_rate = psmouse_set_rate;
        psmouse->set_resolution = psmouse_set_resolution;
        psmouse->poll = psmouse_poll;
index e8573c68f77e900e83e054c720443328adfa2f7f..fd23181c1fb741112f1cc33fa0fc6111284823c3 100644 (file)
@@ -629,10 +629,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                         ((buf[0] & 0x04) >> 1) |
                         ((buf[3] & 0x04) >> 2));
 
+               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+                   hw->w == 2) {
+                       synaptics_parse_agm(buf, priv, hw);
+                       return 1;
+               }
+
+               hw->x = (((buf[3] & 0x10) << 8) |
+                        ((buf[1] & 0x0f) << 8) |
+                        buf[4]);
+               hw->y = (((buf[3] & 0x20) << 7) |
+                        ((buf[1] & 0xf0) << 4) |
+                        buf[5]);
+               hw->z = buf[2];
+
                hw->left  = (buf[0] & 0x01) ? 1 : 0;
                hw->right = (buf[0] & 0x02) ? 1 : 0;
 
-               if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+               if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
+                       /*
+                        * ForcePads, like Clickpads, use middle button
+                        * bits to report primary button clicks.
+                        * Unfortunately they report primary button not
+                        * only when user presses on the pad above certain
+                        * threshold, but also when there are more than one
+                        * finger on the touchpad, which interferes with
+                        * out multi-finger gestures.
+                        */
+                       if (hw->z == 0) {
+                               /* No contacts */
+                               priv->press = priv->report_press = false;
+                       } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
+                               /*
+                                * Single-finger touch with pressure above
+                                * the threshold. If pressure stays long
+                                * enough, we'll start reporting primary
+                                * button. We rely on the device continuing
+                                * sending data even if finger does not
+                                * move.
+                                */
+                               if  (!priv->press) {
+                                       priv->press_start = jiffies;
+                                       priv->press = true;
+                               } else if (time_after(jiffies,
+                                               priv->press_start +
+                                                       msecs_to_jiffies(50))) {
+                                       priv->report_press = true;
+                               }
+                       } else {
+                               priv->press = false;
+                       }
+
+                       hw->left = priv->report_press;
+
+               } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                        /*
                         * Clickpad's button is transmitted as middle button,
                         * however, since it is primary button, we will report
@@ -651,21 +702,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                        hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
                }
 
-               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
-                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
-                   hw->w == 2) {
-                       synaptics_parse_agm(buf, priv, hw);
-                       return 1;
-               }
-
-               hw->x = (((buf[3] & 0x10) << 8) |
-                        ((buf[1] & 0x0f) << 8) |
-                        buf[4]);
-               hw->y = (((buf[3] & 0x20) << 7) |
-                        ((buf[1] & 0xf0) << 4) |
-                        buf[5]);
-               hw->z = buf[2];
-
                if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
                    ((buf[0] ^ buf[3]) & 0x02)) {
                        switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
index e594af0b264b7f147569d77fe5a1d84825ed1a9e..fb2e076738ae3bee7fdd844de7e93dd3fa5146c6 100644 (file)
  * 2   0x08    image sensor            image sensor tracks 5 fingers, but only
  *                                     reports 2.
  * 2   0x20    report min              query 0x0f gives min coord reported
+ * 2   0x80    forcepad                forcepad is a variant of clickpad that
+ *                                     does not have physical buttons but rather
+ *                                     uses pressure above certain threshold to
+ *                                     report primary clicks. Forcepads also have
+ *                                     clickpad bit set.
  */
 #define SYN_CAP_CLICKPAD(ex0c)         ((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)     ((ex0c) & 0x000100) /* 2-button ClickPad */
@@ -86,6 +91,7 @@
 #define SYN_CAP_ADV_GESTURE(ex0c)      ((ex0c) & 0x080000)
 #define SYN_CAP_REDUCED_FILTERING(ex0c)        ((ex0c) & 0x000400)
 #define SYN_CAP_IMAGE_SENSOR(ex0c)     ((ex0c) & 0x000800)
+#define SYN_CAP_FORCEPAD(ex0c)         ((ex0c) & 0x008000)
 
 /* synaptics modes query bits */
 #define SYN_MODE_ABSOLUTE(m)           ((m) & (1 << 7))
@@ -177,6 +183,11 @@ struct synaptics_data {
         */
        struct synaptics_hw_state agm;
        bool agm_pending;                       /* new AGM packet received */
+
+       /* ForcePad handling */
+       unsigned long                           press_start;
+       bool                                    press;
+       bool                                    report_press;
 };
 
 void synaptics_module_init(void);
index e122bda16aabddcf555f84dd06eed4dfbebca031..6bcc0189c1c99898dc39bd5ced920f60c13bd553 100644 (file)
@@ -387,6 +387,7 @@ static int synusb_probe(struct usb_interface *intf,
                __set_bit(EV_REL, input_dev->evbit);
                __set_bit(REL_X, input_dev->relbit);
                __set_bit(REL_Y, input_dev->relbit);
+               __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit);
                input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0);
        } else {
                input_set_abs_params(input_dev, ABS_X,
@@ -401,6 +402,11 @@ static int synusb_probe(struct usb_interface *intf,
                __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
        }
 
+       if (synusb->flags & SYNUSB_TOUCHSCREEN)
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+       else
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
        __set_bit(BTN_LEFT, input_dev->keybit);
        __set_bit(BTN_RIGHT, input_dev->keybit);
        __set_bit(BTN_MIDDLE, input_dev->keybit);
index ca843b6cf6bd9f536c984cf455c83952ff8e2a18..30c8b6998808fa452a19e437c33bd7e9db8a0888 100644 (file)
@@ -393,6 +393,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
        if ((button_info & 0x0f) >= 3)
                __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
 
+       __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
+       __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
+
        trackpoint_defaults(psmouse->private);
 
        error = trackpoint_power_on_reset(&psmouse->ps2dev);
index 136b7b204f56c8d111812559384d6acd8e732514..40b7d6c0ff17f72012654b541aa4325e639a2cf6 100644 (file)
@@ -465,6 +465,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
                },
        },
+       {
+               /* Asus X450LCP */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+               },
+       },
+       {
+               /* Avatar AVIU-145A6 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+               },
+       },
        { }
 };
 
@@ -608,6 +622,14 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
                },
        },
+       {
+               /* Fujitsu U574 laptop */
+               /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+               },
+       },
        { }
 };
 
index 3807c3e971cca79e6ff42b745409ac418487bb0c..f5a98af3b325c1f3257759e31a450c67b2fa00a5 100644 (file)
@@ -1254,6 +1254,8 @@ static int __init i8042_create_aux_port(int idx)
        } else {
                snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
                snprintf(serio->phys, sizeof(serio->phys), I8042_MUX_PHYS_DESC, idx + 1);
+               strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+                       sizeof(serio->firmware_id));
        }
 
        port->serio = serio;
index 0cb7ef59071b792a350a914e88f4e9be7da34efc..69175b8253468cf5d2319659b1496e3ac32d5b10 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/serio.h>
 #include <linux/tty.h>
+#include <linux/compat.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Input device TTY line discipline");
@@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
        return 0;
 }
 
+static void serport_set_type(struct tty_struct *tty, unsigned long type)
+{
+       struct serport *serport = tty->disc_data;
+
+       serport->id.proto = type & 0x000000ff;
+       serport->id.id    = (type & 0x0000ff00) >> 8;
+       serport->id.extra = (type & 0x00ff0000) >> 16;
+}
+
 /*
  * serport_ldisc_ioctl() allows to set the port protocol, and device ID
  */
 
-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
+                              unsigned int cmd, unsigned long arg)
 {
-       struct serport *serport = (struct serport*) tty->disc_data;
-       unsigned long type;
-
        if (cmd == SPIOCSTYPE) {
+               unsigned long type;
+
                if (get_user(type, (unsigned long __user *) arg))
                        return -EFAULT;
 
-               serport->id.proto = type & 0x000000ff;
-               serport->id.id    = (type & 0x0000ff00) >> 8;
-               serport->id.extra = (type & 0x00ff0000) >> 16;
+               serport_set_type(tty, type);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_SPIOCSTYPE      _IOW('q', 0x01, compat_ulong_t)
+static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
+                                      struct file *file,
+                                      unsigned int cmd, unsigned long arg)
+{
+       if (cmd == COMPAT_SPIOCSTYPE) {
+               void __user *uarg = compat_ptr(arg);
+               compat_ulong_t compat_type;
+
+               if (get_user(compat_type, (compat_ulong_t __user *)uarg))
+                       return -EFAULT;
 
+               serport_set_type(tty, compat_type);
                return 0;
        }
 
        return -EINVAL;
 }
+#endif
 
 static void serport_ldisc_write_wakeup(struct tty_struct * tty)
 {
@@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = {
        .close =        serport_ldisc_close,
        .read =         serport_ldisc_read,
        .ioctl =        serport_ldisc_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = serport_ldisc_compat_ioctl,
+#endif
        .receive_buf =  serport_ldisc_receive,
        .write_wakeup = serport_ldisc_write_wakeup
 };
index db178ed2b47ee282c27a5f424f45df168b99a091..aaacf8bfa61fded723f48bcb18dc59ebbd10ab86 100644 (file)
@@ -837,7 +837,12 @@ static irqreturn_t mxt_process_messages_t44(struct mxt_data *data)
        count = data->msg_buf[0];
 
        if (count == 0) {
-               dev_warn(dev, "Interrupt triggered but zero messages\n");
+               /*
+                * This condition is caused by the CHG line being configured
+                * in Mode 0. It results in unnecessary I2C operations but it
+                * is benign.
+                */
+               dev_dbg(dev, "Interrupt triggered but zero messages\n");
                return IRQ_NONE;
        } else if (count > data->max_reportid) {
                dev_err(dev, "T44 count %d exceeded max report id\n", count);
@@ -1374,11 +1379,16 @@ static int mxt_get_info(struct mxt_data *data)
        return 0;
 }
 
-static void mxt_free_object_table(struct mxt_data *data)
+static void mxt_free_input_device(struct mxt_data *data)
 {
-       input_unregister_device(data->input_dev);
-       data->input_dev = NULL;
+       if (data->input_dev) {
+               input_unregister_device(data->input_dev);
+               data->input_dev = NULL;
+       }
+}
 
+static void mxt_free_object_table(struct mxt_data *data)
+{
        kfree(data->object_table);
        data->object_table = NULL;
        kfree(data->msg_buf);
@@ -1957,11 +1967,13 @@ static int mxt_load_fw(struct device *dev, const char *fn)
                ret = mxt_lookup_bootloader_address(data, 0);
                if (ret)
                        goto release_firmware;
+
+               mxt_free_input_device(data);
+               mxt_free_object_table(data);
        } else {
                enable_irq(data->irq);
        }
 
-       mxt_free_object_table(data);
        reinit_completion(&data->bl_completion);
 
        ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false);
@@ -2210,6 +2222,7 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
        return 0;
 
 err_free_object:
+       mxt_free_input_device(data);
        mxt_free_object_table(data);
 err_free_irq:
        free_irq(client->irq, data);
@@ -2224,7 +2237,7 @@ static int mxt_remove(struct i2c_client *client)
 
        sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
        free_irq(data->irq, data);
-       input_unregister_device(data->input_dev);
+       mxt_free_input_device(data);
        mxt_free_object_table(data);
        kfree(data);
 
index 16b52115c27fd49fc9cce79735df7fcad18347c5..705ffa1e064a9ceb391abf51b28343bc3b2ed7ce 100644 (file)
@@ -41,7 +41,7 @@
  */
 static int rpu = 8;
 module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
 
 /*
  * Set current used for pressure measurement.
index 7405353199d7353e31e1bfac4b2f39db3b222fc3..572a5a64face0bff34c0099fa35213442b8a0860 100644 (file)
@@ -41,7 +41,7 @@
  */
 static int rpu = 8;
 module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
 
 /*
  * Set current used for pressure measurement.
index ca18d6d42a9be2a90389882f737a811d2c63dc54..a83cc2a2a2cab5464bd7b4f85d514b00afe580f2 100644 (file)
 #define ID0_CTTW                       (1 << 14)
 #define ID0_NUMIRPT_SHIFT              16
 #define ID0_NUMIRPT_MASK               0xff
+#define ID0_NUMSIDB_SHIFT              9
+#define ID0_NUMSIDB_MASK               0xf
 #define ID0_NUMSMRG_SHIFT              0
 #define ID0_NUMSMRG_MASK               0xff
 
@@ -524,9 +526,18 @@ static int register_smmu_master(struct arm_smmu_device *smmu,
        master->of_node                 = masterspec->np;
        master->cfg.num_streamids       = masterspec->args_count;
 
-       for (i = 0; i < master->cfg.num_streamids; ++i)
-               master->cfg.streamids[i] = masterspec->args[i];
+       for (i = 0; i < master->cfg.num_streamids; ++i) {
+               u16 streamid = masterspec->args[i];
 
+               if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+                    (streamid >= smmu->num_mapping_groups)) {
+                       dev_err(dev,
+                               "stream ID for master device %s greater than maximum allowed (%d)\n",
+                               masterspec->np->name, smmu->num_mapping_groups);
+                       return -ERANGE;
+               }
+               master->cfg.streamids[i] = streamid;
+       }
        return insert_smmu_master(smmu, master);
 }
 
@@ -623,7 +634,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 
        if (fsr & FSR_IGN)
                dev_err_ratelimited(smmu->dev,
-                                   "Unexpected context fault (fsr 0x%u)\n",
+                                   "Unexpected context fault (fsr 0x%x)\n",
                                    fsr);
 
        fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
@@ -752,6 +763,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                        reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
                        break;
                case 39:
+               case 40:
                        reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
                        break;
                case 42:
@@ -773,6 +785,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                        reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
                        break;
                case 39:
+               case 40:
                        reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
                        break;
                case 42:
@@ -843,8 +856,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
        reg |= TTBCR_EAE |
              (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
              (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
-             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
-             (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
+
+       if (!stage1)
+               reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+
        writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 
        /* MAIR0 (stage-1 only) */
@@ -868,10 +884,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                                        struct arm_smmu_device *smmu)
 {
-       int irq, ret, start;
+       int irq, start, ret = 0;
+       unsigned long flags;
        struct arm_smmu_domain *smmu_domain = domain->priv;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 
+       spin_lock_irqsave(&smmu_domain->lock, flags);
+       if (smmu_domain->smmu)
+               goto out_unlock;
+
        if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
                /*
                 * We will likely want to change this if/when KVM gets
@@ -890,7 +911,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
                                      smmu->num_context_banks);
        if (IS_ERR_VALUE(ret))
-               return ret;
+               goto out_unlock;
 
        cfg->cbndx = ret;
        if (smmu->version == 1) {
@@ -900,6 +921,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                cfg->irptndx = cfg->cbndx;
        }
 
+       ACCESS_ONCE(smmu_domain->smmu) = smmu;
+       arm_smmu_init_context_bank(smmu_domain);
+       spin_unlock_irqrestore(&smmu_domain->lock, flags);
+
        irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
        ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
                          "arm-smmu-context-fault", domain);
@@ -907,15 +932,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
                        cfg->irptndx, irq);
                cfg->irptndx = INVALID_IRPTNDX;
-               goto out_free_context;
        }
 
-       smmu_domain->smmu = smmu;
-       arm_smmu_init_context_bank(smmu_domain);
        return 0;
 
-out_free_context:
-       __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+out_unlock:
+       spin_unlock_irqrestore(&smmu_domain->lock, flags);
        return ret;
 }
 
@@ -975,7 +997,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd)
 {
        pgtable_t table = pmd_pgtable(*pmd);
 
-       pgtable_page_dtor(table);
        __free_page(table);
 }
 
@@ -1108,6 +1129,9 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        struct arm_smmu_smr *smrs = cfg->smrs;
 
+       if (!smrs)
+               return;
+
        /* Invalidate the SMRs before freeing back to the allocator */
        for (i = 0; i < cfg->num_streamids; ++i) {
                u8 idx = smrs[i].idx;
@@ -1120,20 +1144,6 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
        kfree(smrs);
 }
 
-static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
-                                          struct arm_smmu_master_cfg *cfg)
-{
-       int i;
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
-       for (i = 0; i < cfg->num_streamids; ++i) {
-               u16 sid = cfg->streamids[i];
-
-               writel_relaxed(S2CR_TYPE_BYPASS,
-                              gr0_base + ARM_SMMU_GR0_S2CR(sid));
-       }
-}
-
 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
                                      struct arm_smmu_master_cfg *cfg)
 {
@@ -1160,23 +1170,30 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
                                          struct arm_smmu_master_cfg *cfg)
 {
+       int i;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
        /*
         * We *must* clear the S2CR first, because freeing the SMR means
         * that it can be re-allocated immediately.
         */
-       arm_smmu_bypass_stream_mapping(smmu, cfg);
+       for (i = 0; i < cfg->num_streamids; ++i) {
+               u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+
+               writel_relaxed(S2CR_TYPE_BYPASS,
+                              gr0_base + ARM_SMMU_GR0_S2CR(idx));
+       }
+
        arm_smmu_master_free_smrs(smmu, cfg);
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
-       int ret = -EINVAL;
+       int ret;
        struct arm_smmu_domain *smmu_domain = domain->priv;
-       struct arm_smmu_device *smmu;
+       struct arm_smmu_device *smmu, *dom_smmu;
        struct arm_smmu_master_cfg *cfg;
-       unsigned long flags;
 
        smmu = dev_get_master_dev(dev)->archdata.iommu;
        if (!smmu) {
@@ -1188,20 +1205,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
         * Sanity check the domain. We don't support domains across
         * different SMMUs.
         */
-       spin_lock_irqsave(&smmu_domain->lock, flags);
-       if (!smmu_domain->smmu) {
+       dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
+       if (!dom_smmu) {
                /* Now that we have a master, we can finalise the domain */
                ret = arm_smmu_init_domain_context(domain, smmu);
                if (IS_ERR_VALUE(ret))
-                       goto err_unlock;
-       } else if (smmu_domain->smmu != smmu) {
+                       return ret;
+
+               dom_smmu = smmu_domain->smmu;
+       }
+
+       if (dom_smmu != smmu) {
                dev_err(dev,
                        "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
-                       dev_name(smmu_domain->smmu->dev),
-                       dev_name(smmu->dev));
-               goto err_unlock;
+                       dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
+               return -EINVAL;
        }
-       spin_unlock_irqrestore(&smmu_domain->lock, flags);
 
        /* Looks ok, so add the device to the domain */
        cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
@@ -1209,10 +1228,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                return -ENODEV;
 
        return arm_smmu_domain_add_master(smmu_domain, cfg);
-
-err_unlock:
-       spin_unlock_irqrestore(&smmu_domain->lock, flags);
-       return ret;
 }
 
 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1247,10 +1262,6 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
                        return -ENOMEM;
 
                arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
-               if (!pgtable_page_ctor(table)) {
-                       __free_page(table);
-                       return -ENOMEM;
-               }
                pmd_populate(NULL, pmd, table);
                arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
        }
@@ -1626,7 +1637,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 
        /* Mark all SMRn as invalid and all S2CRn as bypass */
        for (i = 0; i < smmu->num_mapping_groups; ++i) {
-               writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
+               writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
                writel_relaxed(S2CR_TYPE_BYPASS,
                        gr0_base + ARM_SMMU_GR0_S2CR(i));
        }
@@ -1761,6 +1772,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev,
                           "\tstream matching with %u register groups, mask 0x%x",
                           smmu->num_mapping_groups, mask);
+       } else {
+               smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
+                                          ID0_NUMSIDB_MASK;
        }
 
        /* ID1 */
@@ -1794,11 +1808,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * Stage-1 output limited by stage-2 input size due to pgd
         * allocation (PTRS_PER_PGD).
         */
+       if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
 #ifdef CONFIG_64BIT
-       smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
+               smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
 #else
-       smmu->s1_output_size = min(32UL, size);
+               smmu->s1_output_size = min(32UL, size);
 #endif
+       } else {
+               smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT,
+                                            size);
+       }
 
        /* The stage-2 output mask is also applied for bypass */
        size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
@@ -1889,6 +1908,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
                smmu->irqs[i] = irq;
        }
 
+       err = arm_smmu_device_cfg_probe(smmu);
+       if (err)
+               return err;
+
        i = 0;
        smmu->masters = RB_ROOT;
        while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
@@ -1905,10 +1928,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        }
        dev_notice(dev, "registered %d master devices\n", i);
 
-       err = arm_smmu_device_cfg_probe(smmu);
-       if (err)
-               goto out_put_masters;
-
        parse_driver_options(smmu);
 
        if (smmu->version > 1 &&
index 60ab474bfff38e58ba1e246d7da6ca3a201677cb..06d268abe951bebd946deac82cf092f3f7a67ee3 100644 (file)
@@ -678,8 +678,7 @@ static int __init dmar_acpi_dev_scope_init(void)
                                       andd->device_name);
                                continue;
                        }
-                       acpi_bus_get_device(h, &adev);
-                       if (!adev) {
+                       if (acpi_bus_get_device(h, &adev)) {
                                pr_err("Failed to get device for ACPI object %s\n",
                                       andd->device_name);
                                continue;
index 61d1dafa242d7d01bfff167f610774aff547b1eb..56feed7cec15ca77ef23df1e214e79f58d74935c 100644 (file)
@@ -984,7 +984,7 @@ static int fsl_pamu_add_device(struct device *dev)
        struct iommu_group *group = ERR_PTR(-ENODEV);
        struct pci_dev *pdev;
        const u32 *prop;
-       int ret, len;
+       int ret = 0, len;
 
        /*
         * For platform devices we allocate a separate group for
@@ -1007,7 +1007,13 @@ static int fsl_pamu_add_device(struct device *dev)
        if (IS_ERR(group))
                return PTR_ERR(group);
 
-       ret = iommu_group_add_device(group, dev);
+       /*
+        * Check if device has already been added to an iommu group.
+        * Group could have already been created for a PCI device in
+        * the iommu_group_get_for_dev path.
+        */
+       if (!dev->iommu_group)
+               ret = iommu_group_add_device(group, dev);
 
        iommu_group_put(group);
        return ret;
index ac4adb337038bc990077a2c0557d1f07d8b1de8e..0639b9274b114ac24de51446a636d43f8497557e 100644 (file)
@@ -678,15 +678,17 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
  */
 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 {
-       struct iommu_group *group = ERR_PTR(-EIO);
+       struct iommu_group *group;
        int ret;
 
        group = iommu_group_get(dev);
        if (group)
                return group;
 
-       if (dev_is_pci(dev))
-               group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
+       if (!dev_is_pci(dev))
+               return ERR_PTR(-EINVAL);
+
+       group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
 
        if (IS_ERR(group))
                return group;
index f8636a650cf6489a16d21b09df4e6c49127c5299..5945223b73fa2ceba647649e6c16c84fc5447d08 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
index 85c2985d8bcb5040aeae9c343cacc1352fafe125..bbbaf5de65d2cda705949998e4e3a62ce813496e 100644 (file)
@@ -220,7 +220,7 @@ static int __init crossbar_of_init(struct device_node *node)
                        of_property_read_u32_index(node,
                                                   "ti,irqs-reserved",
                                                   i, &entry);
-                       if (entry > max) {
+                       if (entry >= max) {
                                pr_err("Invalid reserved entry\n");
                                ret = -EINVAL;
                                goto err_irq_map;
@@ -238,7 +238,7 @@ static int __init crossbar_of_init(struct device_node *node)
                        of_property_read_u32_index(node,
                                                   "ti,irqs-skip",
                                                   i, &entry);
-                       if (entry > max) {
+                       if (entry >= max) {
                                pr_err("Invalid skip entry\n");
                                ret = -EINVAL;
                                goto err_irq_map;
index 57eaa5a0b1e39fb4154946700bf83dcfddc4f9b1..a0698b4f03037f8c8717e34b896b6f454fe28e59 100644 (file)
@@ -36,7 +36,7 @@
 struct gic_chip_data {
        void __iomem            *dist_base;
        void __iomem            **redist_base;
-       void __percpu __iomem   **rdist;
+       void __iomem * __percpu *rdist;
        struct irq_domain       *domain;
        u64                     redist_stride;
        u32                     redist_regions;
@@ -104,7 +104,7 @@ static void gic_redist_wait_for_rwp(void)
 }
 
 /* Low level accessors */
-static u64 gic_read_iar(void)
+static u64 __maybe_unused gic_read_iar(void)
 {
        u64 irqstat;
 
@@ -112,24 +112,24 @@ static u64 gic_read_iar(void)
        return irqstat;
 }
 
-static void gic_write_pmr(u64 val)
+static void __maybe_unused gic_write_pmr(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
 }
 
-static void gic_write_ctlr(u64 val)
+static void __maybe_unused gic_write_ctlr(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
        isb();
 }
 
-static void gic_write_grpen1(u64 val)
+static void __maybe_unused gic_write_grpen1(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
        isb();
 }
 
-static void gic_write_sgi1r(u64 val)
+static void __maybe_unused gic_write_sgi1r(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
 }
@@ -200,19 +200,6 @@ static void gic_poke_irq(struct irq_data *d, u32 offset)
        rwp_wait();
 }
 
-static int gic_peek_irq(struct irq_data *d, u32 offset)
-{
-       u32 mask = 1 << (gic_irq(d) % 32);
-       void __iomem *base;
-
-       if (gic_irq_in_rdist(d))
-               base = gic_data_rdist_sgi_base();
-       else
-               base = gic_data.dist_base;
-
-       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
-}
-
 static void gic_mask_irq(struct irq_data *d)
 {
        gic_poke_irq(d, GICD_ICENABLER);
@@ -401,6 +388,19 @@ static void gic_cpu_init(void)
 }
 
 #ifdef CONFIG_SMP
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+       u32 mask = 1 << (gic_irq(d) % 32);
+       void __iomem *base;
+
+       if (gic_irq_in_rdist(d))
+               base = gic_data_rdist_sgi_base();
+       else
+               base = gic_data.dist_base;
+
+       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
 static int gic_secondary_init(struct notifier_block *nfb,
                              unsigned long action, void *hcpu)
 {
index 4b959e606fe8d64494af44f9f8f66f6f069c22d3..dda6dbc23565aa48593d5e3112da13a58acdc3f7 100644 (file)
@@ -867,7 +867,7 @@ static int gic_routable_irq_domain_xlate(struct irq_domain *d,
        return 0;
 }
 
-const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
+static const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
        .map = gic_routable_irq_domain_map,
        .unmap = gic_routable_irq_domain_unmap,
        .xlate = gic_routable_irq_domain_xlate,
index 1af40ee209e2b9c0d46b9873f3bc32fd78234cd7..7130505c242550f3321448094e5b110987ca5969 100644 (file)
@@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
        struct cache *cache = mg->cache;
 
        if (mg->writeback) {
-               cell_defer(cache, mg->old_ocell, false);
                clear_dirty(cache, mg->old_oblock, mg->cblock);
+               cell_defer(cache, mg->old_ocell, false);
                cleanup_migration(mg);
                return;
 
@@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
                }
 
        } else {
+               clear_dirty(cache, mg->new_oblock, mg->cblock);
                if (mg->requeue_holder)
                        cell_defer(cache, mg->new_ocell, true);
                else {
                        bio_endio(mg->new_ocell->holder, 0);
                        cell_defer(cache, mg->new_ocell, false);
                }
-               clear_dirty(cache, mg->new_oblock, mg->cblock);
                cleanup_migration(mg);
        }
 }
index d7690f86fdb9a6202edfa6d8fa5f95517fe8e75b..55de4f6f7eaf47a4633e231d0943f3766f66aee5 100644 (file)
@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
        has_nonrot_disk = 0;
        choose_next_idle = 0;
 
-       if (conf->mddev->recovery_cp < MaxSector &&
-           (this_sector + sectors >= conf->next_resync))
-               choose_first = 1;
-       else
-               choose_first = 0;
+       choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
 
        for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
                sector_t dist;
@@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf)
  *    there is no normal IO happeing.  It must arrange to call
  *    lower_barrier when the particular background IO completes.
  */
-static void raise_barrier(struct r1conf *conf)
+static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
 {
        spin_lock_irq(&conf->resync_lock);
 
@@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf)
 
        /* block any new IO from starting */
        conf->barrier++;
+       conf->next_resync = sector_nr;
 
        /* For these conditions we must wait:
         * A: while the array is in frozen state
@@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf)
         * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
         *    next resync will reach to the window which normal bios are
         *    handling.
+        * D: while there are any active requests in the current window.
         */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->array_frozen &&
                            conf->barrier < RESYNC_DEPTH &&
+                           conf->current_window_requests == 0 &&
                            (conf->start_next_window >=
                             conf->next_resync + RESYNC_SECTORS),
                            conf->resync_lock);
 
+       conf->nr_pending++;
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf)
        BUG_ON(conf->barrier <= 0);
        spin_lock_irqsave(&conf->resync_lock, flags);
        conf->barrier--;
+       conf->nr_pending--;
        spin_unlock_irqrestore(&conf->resync_lock, flags);
        wake_up(&conf->wait_barrier);
 }
@@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
        if (conf->array_frozen || !bio)
                wait = true;
        else if (conf->barrier && bio_data_dir(bio) == WRITE) {
-               if (conf->next_resync < RESYNC_WINDOW_SECTORS)
-                       wait = true;
-               else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
-                               >= bio_end_sector(bio)) ||
-                        (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                               <= bio->bi_iter.bi_sector))
+               if ((conf->mddev->curr_resync_completed
+                    >= bio_end_sector(bio)) ||
+                   (conf->next_resync + NEXT_NORMALIO_DISTANCE
+                    <= bio->bi_iter.bi_sector))
                        wait = false;
                else
                        wait = true;
@@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
        }
 
        if (bio && bio_data_dir(bio) == WRITE) {
-               if (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                   <= bio->bi_iter.bi_sector) {
+               if (bio->bi_iter.bi_sector >=
+                   conf->mddev->curr_resync_completed) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
@@ -1186,6 +1185,7 @@ read_again:
                                   atomic_read(&bitmap->behind_writes) == 0);
                }
                r1_bio->read_disk = rdisk;
+               r1_bio->start_next_window = 0;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
                bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf)
        mempool_destroy(conf->r1buf_pool);
        conf->r1buf_pool = NULL;
 
+       spin_lock_irq(&conf->resync_lock);
        conf->next_resync = 0;
        conf->start_next_window = MaxSector;
+       conf->current_window_requests +=
+               conf->next_window_requests;
+       conf->next_window_requests = 0;
+       spin_unlock_irq(&conf->resync_lock);
 }
 
 static int raid1_spare_active(struct mddev *mddev)
@@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                        d--;
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
-                           test_bit(In_sync, &rdev->flags))
+                           !test_bit(Faulty, &rdev->flags))
                                r1_sync_page_io(rdev, sect, s,
                                                conf->tmppage, WRITE);
                }
@@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                        d--;
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
-                           test_bit(In_sync, &rdev->flags)) {
+                           !test_bit(Faulty, &rdev->flags)) {
                                if (r1_sync_page_io(rdev, sect, s,
                                                    conf->tmppage, READ)) {
                                        atomic_add(s, &rdev->corrected_errors);
@@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
 
        bitmap_cond_end_sync(mddev->bitmap, sector_nr);
        r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
-       raise_barrier(conf);
 
-       conf->next_resync = sector_nr;
+       raise_barrier(conf, sector_nr);
 
        rcu_read_lock();
        /*
index 183588b11fc1d261e173aff9c9e1e63b175ed120..9f0fbecd1eb544200c0b2c16ed7a4ea218239a5c 100644 (file)
 #define cpu_to_group(cpu) cpu_to_node(cpu)
 #define ANY_GROUP NUMA_NO_NODE
 
+static bool devices_handle_discard_safely = false;
+module_param(devices_handle_discard_safely, bool, 0644);
+MODULE_PARM_DESC(devices_handle_discard_safely,
+                "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
 static struct workqueue_struct *raid5_wq;
 /*
  * Stripe cache
@@ -6208,7 +6212,7 @@ static int run(struct mddev *mddev)
                mddev->queue->limits.discard_granularity = stripe;
                /*
                 * unaligned part of discard request will be ignored, so can't
-                * guarantee discard_zerors_data
+                * guarantee discard_zeroes_data
                 */
                mddev->queue->limits.discard_zeroes_data = 0;
 
@@ -6233,6 +6237,18 @@ static int run(struct mddev *mddev)
                            !bdev_get_queue(rdev->bdev)->
                                                limits.discard_zeroes_data)
                                discard_supported = false;
+                       /* Unfortunately, discard_zeroes_data is not currently
+                        * a guarantee - just a hint.  So we only allow DISCARD
+                        * if the sysadmin has confirmed that only safe devices
+                        * are in use by setting a module parameter.
+                        */
+                       if (!devices_handle_discard_safely) {
+                               if (discard_supported) {
+                                       pr_info("md/raid456: discard support disabled due to uncertainty.\n");
+                                       pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
+                               }
+                               discard_supported = false;
+                       }
                }
 
                if (discard_supported &&
index f60bad491eb693f501f3abd652c9c2ab5b48e146..3c89fcbc621efe8ab78c865cc00fd84af81dd018 100644 (file)
@@ -182,7 +182,6 @@ config MEDIA_SUBDRV_AUTOSELECT
        depends on HAS_IOMEM
        select I2C
        select I2C_MUX
-       select SPI
        default y
        help
          By default, a media driver auto-selects all possible ancillary
index 103ef6bad2e2267685f13b296d3012b9d447aa29..be763150b8aad014692871091e5cf702e333d3df 100644 (file)
@@ -1490,6 +1490,7 @@ static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
 {
        struct v4l2_ctrl_config cfg;
 
+       memset(&cfg, 0, sizeof(cfg));
        cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags);
        cfg.ops = &cx2341x_ops;
        cfg.id = id;
index 5135a096bfa689c1f326f8c42125385c16e61936..12ce19c98ded144bc37da6d7498edb5381b0d5ee 100644 (file)
 #define USB_PID_PCTV_400E                              0x020f
 #define USB_PID_PCTV_450E                              0x0222
 #define USB_PID_PCTV_452E                              0x021f
+#define USB_PID_PCTV_78E                               0x025a
+#define USB_PID_PCTV_79E                               0x0262
 #define USB_PID_REALTEK_RTL2831U                       0x2831
 #define USB_PID_REALTEK_RTL2832U                       0x2832
 #define USB_PID_TECHNOTREND_CONNECT_S2_3600            0x3007
index be4bec2a96408b896b9e3e6e62a4a8ac70c4d566..5c90ea683a7e732d3431c8973e43b4ffea649c55 100644 (file)
@@ -314,6 +314,19 @@ static int af9033_init(struct dvb_frontend *fe)
                        goto err;
        }
 
+       /* feed clock to RF tuner */
+       switch (state->cfg.tuner) {
+       case AF9033_TUNER_IT9135_38:
+       case AF9033_TUNER_IT9135_51:
+       case AF9033_TUNER_IT9135_52:
+       case AF9033_TUNER_IT9135_60:
+       case AF9033_TUNER_IT9135_61:
+       case AF9033_TUNER_IT9135_62:
+               ret = af9033_wr_reg(state, 0x80fba8, 0x00);
+               if (ret < 0)
+                       goto err;
+       }
+
        /* settings for TS interface */
        if (state->cfg.ts_mode == AF9033_TS_MODE_USB) {
                ret = af9033_wr_reg_mask(state, 0x80f9a5, 0x00, 0x01);
index fc2ad581e3028dd775636c61fa5cca1ed1ae790f..ded7b67d7526cab8b9cd364dfe6ce19fdb3da36e 100644 (file)
@@ -1418,7 +1418,7 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x800068, 0x0a },
        { 0x80006a, 0x03 },
        { 0x800070, 0x0a },
-       { 0x800071, 0x05 },
+       { 0x800071, 0x0a },
        { 0x800072, 0x02 },
        { 0x800075, 0x8c },
        { 0x800076, 0x8c },
@@ -1484,7 +1484,6 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x800104, 0x02 },
        { 0x800105, 0xbe },
        { 0x800106, 0x00 },
-       { 0x800109, 0x02 },
        { 0x800115, 0x0a },
        { 0x800116, 0x03 },
        { 0x80011a, 0xbe },
@@ -1510,7 +1509,6 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x80014b, 0x8c },
        { 0x80014d, 0xac },
        { 0x80014e, 0xc6 },
-       { 0x80014f, 0x03 },
        { 0x800151, 0x1e },
        { 0x800153, 0xbc },
        { 0x800178, 0x09 },
@@ -1522,9 +1520,10 @@ static const struct reg_val tuner_init_it9135_60[] = {
        { 0x80018d, 0x5f },
        { 0x80018f, 0xa0 },
        { 0x800190, 0x5a },
-       { 0x80ed02, 0xff },
-       { 0x80ee42, 0xff },
-       { 0x80ee82, 0xff },
+       { 0x800191, 0x00 },
+       { 0x80ed02, 0x40 },
+       { 0x80ee42, 0x40 },
+       { 0x80ee82, 0x40 },
        { 0x80f000, 0x0f },
        { 0x80f01f, 0x8c },
        { 0x80f020, 0x00 },
@@ -1699,7 +1698,6 @@ static const struct reg_val tuner_init_it9135_61[] = {
        { 0x800104, 0x02 },
        { 0x800105, 0xc8 },
        { 0x800106, 0x00 },
-       { 0x800109, 0x02 },
        { 0x800115, 0x0a },
        { 0x800116, 0x03 },
        { 0x80011a, 0xc6 },
@@ -1725,7 +1723,6 @@ static const struct reg_val tuner_init_it9135_61[] = {
        { 0x80014b, 0x8c },
        { 0x80014d, 0xa8 },
        { 0x80014e, 0xc6 },
-       { 0x80014f, 0x03 },
        { 0x800151, 0x28 },
        { 0x800153, 0xcc },
        { 0x800178, 0x09 },
@@ -1737,9 +1734,10 @@ static const struct reg_val tuner_init_it9135_61[] = {
        { 0x80018d, 0x5f },
        { 0x80018f, 0xfb },
        { 0x800190, 0x5c },
-       { 0x80ed02, 0xff },
-       { 0x80ee42, 0xff },
-       { 0x80ee82, 0xff },
+       { 0x800191, 0x00 },
+       { 0x80ed02, 0x40 },
+       { 0x80ee42, 0x40 },
+       { 0x80ee82, 0x40 },
        { 0x80f000, 0x0f },
        { 0x80f01f, 0x8c },
        { 0x80f020, 0x00 },
index 72fb5838cae0d7aec7f51bd83bebe5b0afe459b6..7975c6608e20feaf6e664d627a8c3a752dd2e4c3 100644 (file)
@@ -1095,6 +1095,7 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
                sizeof(state->tuner_i2c_adapter.name));
        state->tuner_i2c_adapter.algo      = &cx24123_tuner_i2c_algo;
        state->tuner_i2c_adapter.algo_data = NULL;
+       state->tuner_i2c_adapter.dev.parent = i2c->dev.parent;
        i2c_set_adapdata(&state->tuner_i2c_adapter, state);
        if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) {
                err("tuner i2c bus could not be initialized\n");
index d4fa213ba74ae40ee20c8d31ebebfcfca28f0bb2..de88b980a8370bea73e380da85eb33fe50610761 100644 (file)
@@ -2325,7 +2325,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
        v4l2_info(sd, "HDCP keys read: %s%s\n",
                        (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
                        (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
-       if (!is_hdmi(sd)) {
+       if (is_hdmi(sd)) {
                bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
                bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
                bool audio_mute = io_read(sd, 0x65) & 0x40;
index 1eaf975d3612331ce6c74eb2ba1636904c19d154..62acb10630f9b2a1b12c114eddad10995308389e 100644 (file)
@@ -1282,19 +1282,12 @@ static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
 
        mutex_lock(&sensor->power_mutex);
 
-       /*
-        * If the power count is modified from 0 to != 0 or from != 0
-        * to 0, update the power state.
-        */
-       if (!sensor->power_count == !on)
-               goto out;
-
-       if (on) {
+       if (on && !sensor->power_count) {
                /* Power on and perform initialisation. */
                ret = smiapp_power_on(sensor);
                if (ret < 0)
                        goto out;
-       } else {
+       } else if (!on && sensor->power_count == 1) {
                smiapp_power_off(sensor);
        }
 
@@ -2572,7 +2565,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
 
                this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
                this->sd.internal_ops = &smiapp_internal_ops;
-               this->sd.owner = NULL;
+               this->sd.owner = THIS_MODULE;
                v4l2_set_subdevdata(&this->sd, client);
 
                rval = media_entity_init(&this->sd.entity,
index 716bdc57fac6fb298a904602675605d21c486d2a..83f5074706f9ad296e1bd315e377f037c87c6965 100644 (file)
@@ -1091,6 +1091,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
                setup.addr = ADDR_UNSET;
                setup.type = cx->options.tuner;
                setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
+               setup.config = NULL;
                if (cx->options.radio > 0)
                        setup.mode_mask |= T_RADIO;
                setup.tuner_callback = (setup.type == TUNER_XC2028) ?
index 998919e97dfe27a60c68c1cdb34a5ed95d34ff1c..7b35e633118d68952e22ddb60e4869055eb5a478 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/videodev2.h>
 #include <linux/kthread.h>
index 6f30d7e535b8e0b6d0ce2ea28e508c6334bae221..3d83c425bccf704a4ff174c4bab6b505d4a9ee3a 100644 (file)
@@ -396,6 +396,7 @@ struct dvb_frontend *it913x_attach(struct dvb_frontend *fe,
                struct i2c_adapter *i2c_adap, u8 i2c_addr, u8 config)
 {
        struct it913x_state *state = NULL;
+       int ret;
 
        /* allocate memory for the internal state */
        state = kzalloc(sizeof(struct it913x_state), GFP_KERNEL);
@@ -425,6 +426,11 @@ struct dvb_frontend *it913x_attach(struct dvb_frontend *fe,
        state->tuner_type = config;
        state->firmware_ver = 1;
 
+       /* tuner RF initial */
+       ret = it913x_wr_reg(state, PRO_DMOD, 0xec4c, 0x68);
+       if (ret < 0)
+               goto error;
+
        fe->tuner_priv = state;
        memcpy(&fe->ops.tuner_ops, &it913x_tuner_ops,
                        sizeof(struct dvb_tuner_ops));
index 75ec1c659fdd8542890f9b1134527173dfee9f08..c82beac0e0cbfd1fa9f3225bf044e55eab7de410 100644 (file)
@@ -1575,6 +1575,10 @@ static const struct usb_device_id af9035_id_table[] = {
                &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
        { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
                &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
+       { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E,
+               &af9035_props, "PCTV 78e", RC_MAP_IT913X_V1) },
+       { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E,
+               &af9035_props, "PCTV 79e", RC_MAP_IT913X_V2) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, af9035_id_table);
index a7e24848f6c8d04e3431d27f20107732bbd12686..9da812b8a7861de75722cf5fcfdbe29e3baf39eb 100644 (file)
@@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = {
        .disconnect = em28xx_usb_disconnect,
        .suspend = em28xx_usb_suspend,
        .resume = em28xx_usb_resume,
+       .reset_resume = em28xx_usb_resume,
        .id_table = em28xx_id_table,
 };
 
index 90dec2955f1ca5e790b18fa421c071c7b49aca00..29abc379551e5d6f79483520a892006dca848ed7 100644 (file)
@@ -1342,7 +1342,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
        struct em28xx *dev = video_drvdata(file);
        struct em28xx_v4l2 *v4l2 = dev->v4l2;
 
-       if (v4l2->streaming_users > 0)
+       if (vb2_is_busy(&v4l2->vb_vidq))
                return -EBUSY;
 
        vidioc_try_fmt_vid_cap(file, priv, f);
@@ -1883,8 +1883,9 @@ static int em28xx_v4l2_open(struct file *filp)
                return -EINVAL;
        }
 
-       em28xx_videodbg("open dev=%s type=%s\n",
-                       video_device_node_name(vdev), v4l2_type_names[fh_type]);
+       em28xx_videodbg("open dev=%s type=%s users=%d\n",
+                       video_device_node_name(vdev), v4l2_type_names[fh_type],
+                       v4l2->users);
 
        if (mutex_lock_interruptible(&dev->lock))
                return -ERESTARTSYS;
@@ -1897,9 +1898,7 @@ static int em28xx_v4l2_open(struct file *filp)
                return ret;
        }
 
-       if (v4l2_fh_is_singular_file(filp)) {
-               em28xx_videodbg("first opened filehandle, initializing device\n");
-
+       if (v4l2->users == 0) {
                em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
 
                if (vdev->vfl_type != VFL_TYPE_RADIO)
@@ -1910,8 +1909,6 @@ static int em28xx_v4l2_open(struct file *filp)
                 * of some i2c devices
                 */
                em28xx_wake_i2c(dev);
-       } else {
-               em28xx_videodbg("further filehandles are already opened\n");
        }
 
        if (vdev->vfl_type == VFL_TYPE_RADIO) {
@@ -1921,6 +1918,7 @@ static int em28xx_v4l2_open(struct file *filp)
 
        kref_get(&dev->ref);
        kref_get(&v4l2->ref);
+       v4l2->users++;
 
        mutex_unlock(&dev->lock);
 
@@ -2027,11 +2025,12 @@ static int em28xx_v4l2_close(struct file *filp)
        struct em28xx_v4l2    *v4l2 = dev->v4l2;
        int              errCode;
 
-       mutex_lock(&dev->lock);
+       em28xx_videodbg("users=%d\n", v4l2->users);
 
-       if (v4l2_fh_is_singular_file(filp)) {
-               em28xx_videodbg("last opened filehandle, shutting down device\n");
+       vb2_fop_release(filp);
+       mutex_lock(&dev->lock);
 
+       if (v4l2->users == 1) {
                /* No sense to try to write to the device */
                if (dev->disconnected)
                        goto exit;
@@ -2050,12 +2049,10 @@ static int em28xx_v4l2_close(struct file *filp)
                        em28xx_errdev("cannot change alternate number to "
                                        "0 (error=%i)\n", errCode);
                }
-       } else {
-               em28xx_videodbg("further opened filehandles left\n");
        }
 
 exit:
-       vb2_fop_release(filp);
+       v4l2->users--;
        kref_put(&v4l2->ref, em28xx_free_v4l2);
        mutex_unlock(&dev->lock);
        kref_put(&dev->ref, em28xx_free_device);
index 84ef8efdb148c1dcf81d5be799a2f819bde3d3f5..4360338e7b310a2ab985eca427ef34124ddf15a5 100644 (file)
@@ -524,6 +524,7 @@ struct em28xx_v4l2 {
        int sensor_yres;
        int sensor_xtal;
 
+       int users;              /* user count for exclusive use */
        int streaming_users;    /* number of actively streaming users */
 
        u32 frequency;          /* selected tuner frequency */
index c359006074a8ccafeb3a7c4150873f1c13b0d2b9..25d3ae2188cb29fc2338e0dc976d21db17829447 100644 (file)
@@ -971,6 +971,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
         * to the userspace.
         */
        req->count = allocated_buffers;
+       q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
 
        return 0;
 }
@@ -1018,6 +1019,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
                memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
                memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
                q->memory = create->memory;
+               q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
        }
 
        num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
@@ -1130,7 +1132,7 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
  */
 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
 {
-       if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+       if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
                return NULL;
 
        return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
@@ -1165,13 +1167,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
        if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
                return;
 
-       if (!q->start_streaming_called) {
-               if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
-                       state = VB2_BUF_STATE_QUEUED;
-       } else if (WARN_ON(state != VB2_BUF_STATE_DONE &&
-                          state != VB2_BUF_STATE_ERROR)) {
-                       state = VB2_BUF_STATE_ERROR;
-       }
+       if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+                   state != VB2_BUF_STATE_ERROR &&
+                   state != VB2_BUF_STATE_QUEUED))
+               state = VB2_BUF_STATE_ERROR;
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
        /*
@@ -1762,6 +1761,12 @@ static int vb2_start_streaming(struct vb2_queue *q)
        q->start_streaming_called = 0;
 
        dprintk(1, "driver refused to start streaming\n");
+       /*
+        * If you see this warning, then the driver isn't cleaning up properly
+        * after a failed start_streaming(). See the start_streaming()
+        * documentation in videobuf2-core.h for more information how buffers
+        * should be returned to vb2 in start_streaming().
+        */
        if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
                unsigned i;
 
@@ -1777,6 +1782,12 @@ static int vb2_start_streaming(struct vb2_queue *q)
                /* Must be zero now */
                WARN_ON(atomic_read(&q->owned_by_drv_count));
        }
+       /*
+        * If done_list is not empty, then start_streaming() didn't call
+        * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
+        * STATE_DONE.
+        */
+       WARN_ON(!list_empty(&q->done_list));
        return ret;
 }
 
@@ -1812,6 +1823,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
         */
        list_add_tail(&vb->queued_entry, &q->queued_list);
        q->queued_count++;
+       q->waiting_for_buffers = false;
        vb->state = VB2_BUF_STATE_QUEUED;
        if (V4L2_TYPE_IS_OUTPUT(q->type)) {
                /*
@@ -2123,6 +2135,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
        if (q->start_streaming_called)
                call_void_qop(q, stop_streaming, q);
 
+       /*
+        * If you see this warning, then the driver isn't cleaning up properly
+        * in stop_streaming(). See the stop_streaming() documentation in
+        * videobuf2-core.h for more information how buffers should be returned
+        * to vb2 in stop_streaming().
+        */
        if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
                for (i = 0; i < q->num_buffers; ++i)
                        if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
@@ -2272,6 +2290,7 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
         * their normal dequeued state.
         */
        __vb2_queue_cancel(q);
+       q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
 
        dprintk(3, "successful\n");
        return 0;
@@ -2590,10 +2609,17 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
        }
 
        /*
-        * There is nothing to wait for if no buffer has been queued and the
-        * queue isn't streaming, or if the error flag is set.
+        * There is nothing to wait for if the queue isn't streaming, or if the
+        * error flag is set.
+        */
+       if (!vb2_is_streaming(q) || q->error)
+               return res | POLLERR;
+       /*
+        * For compatibility with vb1: if QBUF hasn't been called yet, then
+        * return POLLERR as well. This only affects capture queues, output
+        * queues will always initialize waiting_for_buffers to false.
         */
-       if ((list_empty(&q->queued_list) && !vb2_is_streaming(q)) || q->error)
+       if (q->waiting_for_buffers)
                return res | POLLERR;
 
        /*
index adefc31bb85302ab93648081f95951a47753409c..9b163a440f89881e354e76748907a39b1783dc6f 100644 (file)
@@ -113,7 +113,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
                goto fail_pages_alloc;
 
        ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
-                       buf->num_pages, 0, size, gfp_flags);
+                       buf->num_pages, 0, size, GFP_KERNEL);
        if (ret)
                goto fail_table_alloc;
 
index a34a11d2fef2dff5d8b76d062ae1d3d401d29053..63ca9841db10d89eecb19af95b9943e56bc07ec2 100644 (file)
@@ -29,7 +29,7 @@ config FUSION_SPI
 config FUSION_FC
        tristate "Fusion MPT ScsiHost drivers for FC"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        ---help---
          SCSI HOST support for a Fiber Channel host adapters.
 
index 7ffdb589841ed2fe2e25ec2580218e4dfd07a2a2..7e1efd5f58f0b04ed8b303d142628755f56a72d7 100644 (file)
@@ -79,6 +79,11 @@ static void firmware_load(const struct firmware *fw, void *context)
        u32 jedec_id;
        u32 status;
 
+       if (fw == NULL) {
+               dev_err(&spi->dev, "Cannot load firmware, aborting\n");
+               return;
+       }
+
        if (fw->size == 0) {
                dev_err(&spi->dev, "Error: Firmware size is 0!\n");
                return;
index f0f5eab0fab102bbf3b4417632abcc5bf6082ea8..798ae69fb63c2ee22973e05bb276cd539e47f24a 100644 (file)
@@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
                                "the same MAC; 0 for none (default), "
                                "1 for active, 2 for follow");
 module_param(all_slaves_active, int, 0);
-MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
+MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
                                     "by setting active flag for all slaves; "
                                     "0 for never (default), 1 for always.");
 module_param(resend_igmp, int, 0);
@@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
                else
                        bond_xmit_slave_id(bond, skb, 0);
        } else {
-               slave_id = bond_rr_gen_slave_id(bond);
-               bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
+               int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+
+               if (likely(slave_cnt)) {
+                       slave_id = bond_rr_gen_slave_id(bond);
+                       bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+               } else {
+                       dev_kfree_skb_any(skb);
+               }
        }
 
        return NETDEV_TX_OK;
@@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
 static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
 
-       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
+       if (likely(slave_cnt))
+               bond_xmit_slave_id(bond, skb,
+                                  bond_xmit_hash(bond, skb) % slave_cnt);
+       else
+               dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index f07fa89b5fd5b294a5a234e949985080a0617a1d..05e1aa090adda6d98ed8e05dc5e14723556935bb 100644 (file)
@@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev)
        struct at91_priv *priv = netdev_priv(dev);
        int err;
 
-       clk_enable(priv->clk);
+       err = clk_prepare_enable(priv->clk);
+       if (err)
+               return err;
 
        /* check or determine and set bittime */
        err = open_candev(dev);
@@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev)
  out_close:
        close_candev(dev);
  out:
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return err;
 }
@@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev)
        at91_chip_stop(dev, CAN_STATE_STOPPED);
 
        free_irq(dev->irq, dev);
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        close_candev(dev);
 
index 109cb44291f51f3af4c45979aa3b3ef53099d061..fb279d6ae484ffe3c66f45ab6a8cbb339ef0808f 100644 (file)
@@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
        ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
        writel(ctrl, priv->raminit_ctrlreg);
        ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
-       c_can_hw_raminit_wait_ti(priv, ctrl, mask);
+       c_can_hw_raminit_wait_ti(priv, mask, ctrl);
 
        if (enable) {
                /* Set start bit and wait for the done bit. */
                ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
                writel(ctrl, priv->raminit_ctrlreg);
                ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
-               c_can_hw_raminit_wait_ti(priv, ctrl, mask);
+               c_can_hw_raminit_wait_ti(priv, mask, ctrl);
        }
        spin_unlock(&raminit_lock);
 }
index 944aa5d3af6ef4602329b75a57e8f87be85d79f8..6586309329e6130b9663bb3d3b6287e6d09f38c8 100644 (file)
@@ -62,7 +62,7 @@
 #define FLEXCAN_MCR_BCC                        BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN           BIT(13)
 #define FLEXCAN_MCR_AEN                        BIT(12)
-#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x1f)
+#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x7f)
 #define FLEXCAN_MCR_IDAM_A             (0 << 8)
 #define FLEXCAN_MCR_IDAM_B             (1 << 8)
 #define FLEXCAN_MCR_IDAM_C             (2 << 8)
         FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
 
 /* FLEXCAN interrupt flag register (IFLAG) bits */
-#define FLEXCAN_TX_BUF_ID              8
+/* Errata ERR005829 step7: Reserve first valid MB */
+#define FLEXCAN_TX_BUF_RESERVED                8
+#define FLEXCAN_TX_BUF_ID              9
 #define FLEXCAN_IFLAG_BUF(x)           BIT(x)
 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
 #define FLEXCAN_IFLAG_RX_FIFO_WARN     BIT(6)
 
 /* FLEXCAN message buffers */
 #define FLEXCAN_MB_CNT_CODE(x)         (((x) & 0xf) << 24)
+#define FLEXCAN_MB_CODE_RX_INACTIVE    (0x0 << 24)
+#define FLEXCAN_MB_CODE_RX_EMPTY       (0x4 << 24)
+#define FLEXCAN_MB_CODE_RX_FULL                (0x2 << 24)
+#define FLEXCAN_MB_CODE_RX_OVERRRUN    (0x6 << 24)
+#define FLEXCAN_MB_CODE_RX_RANSWER     (0xa << 24)
+
+#define FLEXCAN_MB_CODE_TX_INACTIVE    (0x8 << 24)
+#define FLEXCAN_MB_CODE_TX_ABORT       (0x9 << 24)
+#define FLEXCAN_MB_CODE_TX_DATA                (0xc << 24)
+#define FLEXCAN_MB_CODE_TX_TANSWER     (0xe << 24)
+
 #define FLEXCAN_MB_CNT_SRR             BIT(22)
 #define FLEXCAN_MB_CNT_IDE             BIT(21)
 #define FLEXCAN_MB_CNT_RTR             BIT(20)
@@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
                return -ETIMEDOUT;
@@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
                return -ETIMEDOUT;
@@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-               usleep_range(100, 200);
+               udelay(100);
 
        if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
                return -ETIMEDOUT;
@@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
                return -ETIMEDOUT;
@@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
 
        flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
                return -ETIMEDOUT;
@@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
        flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
        flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
 
+       /* Errata ERR005829 step8:
+        * Write twice INACTIVE(0x8) code to first MB.
+        */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
        return NETDEV_TX_OK;
 }
 
@@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                stats->tx_bytes += can_get_echo_skb(dev, 0);
                stats->tx_packets++;
                can_led_event(dev, CAN_LED_EVENT_TX);
+               /* after sending a RTR frame mailbox is in RX mode */
+               flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                             &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
                flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
                netif_wake_queue(dev);
        }
@@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev)
        struct flexcan_regs __iomem *regs = priv->base;
        int err;
        u32 reg_mcr, reg_ctrl;
+       int i;
 
        /* enable module */
        err = flexcan_chip_enable(priv);
@@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
-       /* Abort any pending TX, mark Mailbox as INACTIVE */
-       flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+       /* clear and invalidate all mailboxes first */
+       for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) {
+               flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
+                             &regs->cantxfg[i].can_ctrl);
+       }
+
+       /* Errata ERR005829: mark first TX mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
+       /* mark TX mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
                      &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
 
        /* acceptance mask/acceptance code (accept everything) */
index 7a85590fefb9af4867cfe77cacbcd95726ecc52b..e5fac368068a2320eb06d207934c4b969356237b 100644 (file)
@@ -70,6 +70,8 @@ struct peak_pci_chan {
 #define PEAK_PC_104P_DEVICE_ID 0x0006  /* PCAN-PC/104+ cards */
 #define PEAK_PCI_104E_DEVICE_ID        0x0007  /* PCAN-PCI/104 Express cards */
 #define PEAK_MPCIE_DEVICE_ID   0x0008  /* The miniPCIe slot cards */
+#define PEAK_PCIE_OEM_ID       0x0009  /* PCAN-PCI Express OEM */
+#define PEAK_PCIEC34_DEVICE_ID 0x000A  /* PCAN-PCI Express 34 (one channel) */
 
 #define PEAK_PCI_CHAN_MAX      4
 
@@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
        {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #ifdef CONFIG_CAN_PEAK_PCIEC
        {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+       {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #endif
        {0,}
 };
@@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                 * This must be done *before* register_sja1000dev() but
                 * *after* devices linkage
                 */
-               if (pdev->device == PEAK_PCIEC_DEVICE_ID) {
+               if (pdev->device == PEAK_PCIEC_DEVICE_ID ||
+                   pdev->device == PEAK_PCIEC34_DEVICE_ID) {
                        err = peak_pciec_probe(pdev, dev);
                        if (err) {
                                dev_err(&pdev->dev,
index 059c7414e30318b3ccaab436a248d63319823369..8ca49f04acecb22ff61b15437c2ecf4def4c190a 100644 (file)
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        int entry = vp->cur_tx % TX_RING_SIZE;
        struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
        unsigned long flags;
+       dma_addr_t dma_addr;
 
        if (vortex_debug > 6) {
                pr_debug("boomerang_start_xmit()\n");
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
 
        if (!skb_shinfo(skb)->nr_frags) {
-               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
-                                                                               skb->len, PCI_DMA_TODEVICE));
+               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
+                                         PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+                       goto out_dma_err;
+
+               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
                vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
        } else {
                int i;
 
-               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
-                                                                               skb_headlen(skb), PCI_DMA_TODEVICE));
+               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
+                                         skb_headlen(skb), PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+                       goto out_dma_err;
+
+               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
                vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
+                       dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+                                                   0,
+                                                   frag->size,
+                                                   DMA_TO_DEVICE);
+                       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+                               for(i = i-1; i >= 0; i--)
+                                       dma_unmap_page(&VORTEX_PCI(vp)->dev,
+                                                      le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
+                                                      le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
+                                                      DMA_TO_DEVICE);
+
+                               pci_unmap_single(VORTEX_PCI(vp),
+                                                le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+                                                le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+                                                PCI_DMA_TODEVICE);
+
+                               goto out_dma_err;
+                       }
+
                        vp->tx_ring[entry].frag[i+1].addr =
-                                       cpu_to_le32(pci_map_single(
-                                               VORTEX_PCI(vp),
-                                               (void *)skb_frag_address(frag),
-                                               skb_frag_size(frag), PCI_DMA_TODEVICE));
+                                               cpu_to_le32(dma_addr);
 
                        if (i == skb_shinfo(skb)->nr_frags-1)
                                        vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 #else
-       vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+       dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+               goto out_dma_err;
+       vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
        vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
        vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
 #endif
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
        iowrite16(DownUnstall, ioaddr + EL3_CMD);
        spin_unlock_irqrestore(&vp->lock, flags);
+out:
        return NETDEV_TX_OK;
+out_dma_err:
+       dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+       goto out;
 }
 
 /* The interrupt handler does all of the Rx thread work and cleans up
index 23578dfee249879064c1b86526ee80aead68b79f..3005155e412b2e0e1ab1d2c969e4895a2723644c 100644 (file)
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
        GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
 }
 
+static inline void greth_enable_tx_and_irq(struct greth_private *greth)
+{
+       wmb(); /* BDs must been written to memory before enabling TX */
+       GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
+}
+
 static inline void greth_disable_tx(struct greth_private *greth)
 {
        GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
@@ -447,29 +453,30 @@ out:
        return err;
 }
 
+static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
+{
+       if (tx_next < tx_last)
+               return (tx_last - tx_next) - 1;
+       else
+               return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
+}
 
 static netdev_tx_t
 greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 {
        struct greth_private *greth = netdev_priv(dev);
        struct greth_bd *bdp;
-       u32 status = 0, dma_addr, ctrl;
+       u32 status, dma_addr;
        int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
        unsigned long flags;
+       u16 tx_last;
 
        nr_frags = skb_shinfo(skb)->nr_frags;
+       tx_last = greth->tx_last;
+       rmb(); /* tx_last is updated by the poll task */
 
-       /* Clean TX Ring */
-       greth_clean_tx_gbit(dev);
-
-       if (greth->tx_free < nr_frags + 1) {
-               spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
-               ctrl = GRETH_REGLOAD(greth->regs->control);
-               /* Enable TX IRQ only if not already in poll() routine */
-               if (ctrl & GRETH_RXI)
-                       GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+       if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
                netif_stop_queue(dev);
-               spin_unlock_irqrestore(&greth->devlock, flags);
                err = NETDEV_TX_BUSY;
                goto out;
        }
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
        /* Linear buf */
        if (nr_frags != 0)
                status = GRETH_TXBD_MORE;
+       else
+               status = GRETH_BD_IE;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                status |= GRETH_TXBD_CSALL;
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 
        /* Enable the descriptor chain by enabling the first descriptor */
        bdp = greth->tx_bd_base + greth->tx_next;
-       greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
-       greth->tx_next = curr_tx;
-       greth->tx_free -= nr_frags + 1;
-
-       wmb();
+       greth_write_bd(&bdp->stat,
+                      greth_read_bd(&bdp->stat) | GRETH_BD_EN);
 
        spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
-       greth_enable_tx(greth);
+       greth->tx_next = curr_tx;
+       greth_enable_tx_and_irq(greth);
        spin_unlock_irqrestore(&greth->devlock, flags);
 
        return NETDEV_TX_OK;
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
        if (greth->tx_free > 0) {
                netif_wake_queue(dev);
        }
-
 }
 
 static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
 {
        struct greth_private *greth;
        struct greth_bd *bdp, *bdp_last_frag;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        u32 stat;
        int nr_frags, i;
+       u16 tx_last;
 
        greth = netdev_priv(dev);
+       tx_last = greth->tx_last;
 
-       while (greth->tx_free < GRETH_TXBD_NUM) {
+       while (tx_last != greth->tx_next) {
 
-               skb = greth->tx_skbuff[greth->tx_last];
+               skb = greth->tx_skbuff[tx_last];
 
                nr_frags = skb_shinfo(skb)->nr_frags;
 
                /* We only clean fully completed SKBs */
-               bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+               bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
 
                GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
                mb();
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
                if (stat & GRETH_BD_EN)
                        break;
 
-               greth->tx_skbuff[greth->tx_last] = NULL;
+               greth->tx_skbuff[tx_last] = NULL;
 
                greth_update_tx_stats(dev, stat);
                dev->stats.tx_bytes += skb->len;
 
-               bdp = greth->tx_bd_base + greth->tx_last;
+               bdp = greth->tx_bd_base + tx_last;
 
-               greth->tx_last = NEXT_TX(greth->tx_last);
+               tx_last = NEXT_TX(tx_last);
 
                dma_unmap_single(greth->dev,
                                 greth_read_bd(&bdp->addr),
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
 
                for (i = 0; i < nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-                       bdp = greth->tx_bd_base + greth->tx_last;
+                       bdp = greth->tx_bd_base + tx_last;
 
                        dma_unmap_page(greth->dev,
                                       greth_read_bd(&bdp->addr),
                                       skb_frag_size(frag),
                                       DMA_TO_DEVICE);
 
-                       greth->tx_last = NEXT_TX(greth->tx_last);
+                       tx_last = NEXT_TX(tx_last);
                }
-               greth->tx_free += nr_frags+1;
                dev_kfree_skb(skb);
        }
+       if (skb) { /* skb is set only if the above while loop was entered */
+               wmb();
+               greth->tx_last = tx_last;
 
-       if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
-               netif_wake_queue(dev);
+               if (netif_queue_stopped(dev) &&
+                   (greth_num_free_bds(tx_last, greth->tx_next) >
+                   (MAX_SKB_FRAGS+1)))
+                       netif_wake_queue(dev);
+       }
 }
 
 static int greth_rx(struct net_device *dev, int limit)
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
        greth = container_of(napi, struct greth_private, napi);
 
 restart_txrx_poll:
-       if (netif_queue_stopped(greth->netdev)) {
-               if (greth->gbit_mac)
-                       greth_clean_tx_gbit(greth->netdev);
-               else
-                       greth_clean_tx(greth->netdev);
-       }
-
        if (greth->gbit_mac) {
+               greth_clean_tx_gbit(greth->netdev);
                work_done += greth_rx_gbit(greth->netdev, budget - work_done);
        } else {
+               if (netif_queue_stopped(greth->netdev))
+                       greth_clean_tx(greth->netdev);
                work_done += greth_rx(greth->netdev, budget - work_done);
        }
 
@@ -983,7 +992,8 @@ restart_txrx_poll:
                spin_lock_irqsave(&greth->devlock, flags);
 
                ctrl = GRETH_REGLOAD(greth->regs->control);
-               if (netif_queue_stopped(greth->netdev)) {
+               if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
+                   (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
                        GRETH_REGSAVE(greth->regs->control,
                                        ctrl | GRETH_TXI | GRETH_RXI);
                        mask = GRETH_INT_RX | GRETH_INT_RE |
index 232a622a85b7006ce8cb235f1d9790ea7b2c4929..ae16ac94daf825a5b800356603cb1c1213636d34 100644 (file)
@@ -107,7 +107,7 @@ struct greth_private {
 
        u16 tx_next;
        u16 tx_last;
-       u16 tx_free;
+       u16 tx_free; /* only used on 10/100Mbit */
        u16 rx_cur;
 
        struct greth_regs *regs;        /* Address of controller registers. */
index 346592dca33ce98dd94e24f33f99eabb373f60cb..a3c11355a34dd199a40252dab901ab23cb908ffb 100644 (file)
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
        struct xgbe_prv_data *pdata = filp->private_data;
        unsigned int value;
 
-       value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
-                                          pdata->debugfs_xpcs_reg);
+       value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+                          pdata->debugfs_xpcs_reg);
 
        return xgbe_common_read(buffer, count, ppos, value);
 }
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
        if (len < 0)
                return len;
 
-       pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
-                                   pdata->debugfs_xpcs_reg, value);
+       XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+                   value);
 
        return len;
 }
index edaca4496264862063835a444594471df29658a2..ea273836d999c854d89d45789e33d1ff81afc92b 100644 (file)
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
 
        /* Clear MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
 
        /* Set MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
 
        /* Enable all counter interrupts */
-       XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
-       XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+       XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+       XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
 }
 
 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
 {
        unsigned int i, count;
 
+       if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+               return 0;
+
        for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
 
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
 }
 
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
-                                                 unsigned char queue_count)
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+                                                 unsigned int queue_count)
 {
        unsigned int q_fifo_size = 0;
        enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
                q_fifo_size = XGBE_FIFO_SIZE_KB(256);
                break;
        }
+
+       /* The configured value is not the actual amount of fifo RAM */
+       q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+
        q_fifo_size = q_fifo_size / queue_count;
 
        /* Set the queue fifo size programmable value */
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
                xgbe_disable_rx_vlan_stripping(pdata);
 }
 
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+       bool read_hi;
+       u64 val;
+
+       switch (reg_lo) {
+       /* These registers are always 64 bit */
+       case MMC_TXOCTETCOUNT_GB_LO:
+       case MMC_TXOCTETCOUNT_G_LO:
+       case MMC_RXOCTETCOUNT_GB_LO:
+       case MMC_RXOCTETCOUNT_G_LO:
+               read_hi = true;
+               break;
+
+       default:
+               read_hi = false;
+       };
+
+       val = XGMAC_IOREAD(pdata, reg_lo);
+
+       if (read_hi)
+               val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+       return val;
+}
+
 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
 {
        struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
                stats->txoctetcount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
                stats->txframecount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
                stats->txbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
                stats->txmulticastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
                stats->tx64octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
                stats->tx65to127octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
                stats->tx128to255octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
                stats->tx256to511octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
                stats->tx512to1023octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
                stats->tx1024tomaxoctets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
                stats->txunicastframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
                stats->txmulticastframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
                stats->txbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
                stats->txunderflowerror +=
-                       XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
                stats->txoctetcount_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
                stats->txframecount_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
                stats->txpauseframes +=
-                       XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+                       xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
                stats->txvlanframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
 }
 
 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
                stats->rxframecount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
                stats->rxoctetcount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
                stats->rxoctetcount_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
                stats->rxbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
                stats->rxmulticastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
                stats->rxcrcerror +=
-                       XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
                stats->rxrunterror +=
-                       XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+                       xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
                stats->rxjabbererror +=
-                       XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+                       xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
                stats->rxundersize_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+                       xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
                stats->rxoversize_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+                       xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
                stats->rx64octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
                stats->rx65to127octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
                stats->rx128to255octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
                stats->rx256to511octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
                stats->rx512to1023octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
                stats->rx1024tomaxoctets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
                stats->rxunicastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
                stats->rxlengtherror +=
-                       XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
                stats->rxoutofrangetype +=
-                       XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
                stats->rxpauseframes +=
-                       XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+                       xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
                stats->rxfifooverflow +=
-                       XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+                       xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
                stats->rxvlanframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
                stats->rxwatchdogerror +=
-                       XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+                       xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
 }
 
 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
 
        stats->txoctetcount_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
 
        stats->txframecount_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
 
        stats->txbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
 
        stats->txmulticastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
 
        stats->tx64octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
 
        stats->tx65to127octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
 
        stats->tx128to255octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
 
        stats->tx256to511octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
 
        stats->tx512to1023octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
 
        stats->tx1024tomaxoctets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
 
        stats->txunicastframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
 
        stats->txmulticastframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
 
        stats->txbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
 
        stats->txunderflowerror +=
-               XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+               xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
 
        stats->txoctetcount_g +=
-               XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
 
        stats->txframecount_g +=
-               XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
 
        stats->txpauseframes +=
-               XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+               xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
 
        stats->txvlanframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
 
        stats->rxframecount_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
 
        stats->rxoctetcount_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
 
        stats->rxoctetcount_g +=
-               XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
 
        stats->rxbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
 
        stats->rxmulticastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
 
        stats->rxcrcerror +=
-               XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+               xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
 
        stats->rxrunterror +=
-               XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+               xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
 
        stats->rxjabbererror +=
-               XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+               xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
 
        stats->rxundersize_g +=
-               XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+               xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
 
        stats->rxoversize_g +=
-               XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+               xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
 
        stats->rx64octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
 
        stats->rx65to127octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
 
        stats->rx128to255octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
 
        stats->rx256to511octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
 
        stats->rx512to1023octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
 
        stats->rx1024tomaxoctets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
 
        stats->rxunicastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
 
        stats->rxlengtherror +=
-               XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+               xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
 
        stats->rxoutofrangetype +=
-               XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+               xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
 
        stats->rxpauseframes +=
-               XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+               xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
 
        stats->rxfifooverflow +=
-               XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+               xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
 
        stats->rxvlanframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
 
        stats->rxwatchdogerror +=
-               XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+               xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
 
        /* Un-freeze counters */
        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
index dc84f7193c2db62aaf5d922f9beb409b13a975c3..b26d75856553bf62c975e533dc815051f7968de5 100644 (file)
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
 
        memset(hw_feat, 0, sizeof(*hw_feat));
 
+       hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
        /* Hardware feature register 0 */
        hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
        hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
index a076aca138a12ce4cdf69fb872d1d61c4202fbfa..46f613028e9c00ba9b659a4b8ccc7a8f5f47c644 100644 (file)
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
                             struct ethtool_drvinfo *drvinfo)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
 
        strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
                sizeof(drvinfo->bus_info));
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
        drvinfo->n_stats = XGBE_STATS_COUNT;
 }
 
index 8aa6a9353f7bc5b5b3cf7592bc42186a93ca0bce..bdf9cfa70e88c4dac3c7f3c439fa2b3eef29aaa6 100644 (file)
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
                }
 
                if (i < pdata->rx_ring_count) {
-                       spin_lock_init(&tx_ring->lock);
+                       spin_lock_init(&rx_ring->lock);
                        channel->rx_ring = rx_ring++;
                }
 
index 07bf70a82908ead8e75b6e4704946ae10c961b37..e9fe6e6ddcc34acd7469aef6b523855e535f5a99 100644 (file)
 #define XGMAC_DRIVER_CONTEXT   1
 #define XGMAC_IOCTL_CONTEXT    2
 
+#define XGBE_FIFO_MAX          81920
 #define XGBE_FIFO_SIZE_B(x)    (x)
 #define XGBE_FIFO_SIZE_KB(x)   (x * 1024)
 
@@ -526,6 +527,9 @@ struct xgbe_desc_if {
  * or configurations are present in the device.
  */
 struct xgbe_hw_features {
+       /* HW Version */
+       unsigned int version;
+
        /* HW Feature Register0 */
        unsigned int gmii;              /* 1000 Mbps support */
        unsigned int vlhash;            /* VLAN Hash Filter */
index 616dff6d3f5f3cc2837cdd9de776e943283a24fc..f4054d242f3c7ac6e52437c5cb795cac8e3da2dd 100644 (file)
@@ -1,5 +1,6 @@
 config NET_XGENE
        tristate "APM X-Gene SoC Ethernet Driver"
+       depends on HAS_DMA
        select PHYLIB
        help
          This is the Ethernet driver for the on-chip ethernet interface on the
index fe5cfeace6e3e1cd5bdcbce922b5b51e0f7cc326..5919394d9f585f4c635b27dc985919b25d250646 100644 (file)
 #define DRV_NAME       "arc_emac"
 #define DRV_VERSION    "1.0"
 
+/**
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
+ * @priv: Pointer to ARC EMAC private data structure.
+ *
+ * returns: the number of slots available for transmission in tx the ring.
+ */
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
+{
+       return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
+}
+
 /**
  * arc_emac_adjust_link - Adjust the PHY link duplex.
  * @ndev:      Pointer to the net_device structure.
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
                txbd->info = 0;
 
                *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
-               if (netif_queue_stopped(ndev))
-                       netif_wake_queue(ndev);
        }
+
+       /* Ensure that txbd_dirty is visible to tx() before checking
+        * for queue stopped.
+        */
+       smp_mb();
+
+       if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
+               netif_wake_queue(ndev);
 }
 
 /**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
                napi_complete(napi);
-               arc_reg_or(priv, R_ENABLE, RXINT_MASK);
+               arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
        return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
        /* Reset all flags except "MDIO complete" */
        arc_reg_set(priv, R_STATUS, status);
 
-       if (status & RXINT_MASK) {
+       if (status & (RXINT_MASK | TXINT_MASK)) {
                if (likely(napi_schedule_prep(&priv->napi))) {
-                       arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
+                       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
                        __napi_schedule(&priv->napi);
                }
        }
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
        arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
 
        /* Enable interrupts */
-       arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
        /* Set CONTROL */
        arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
        netif_stop_queue(ndev);
 
        /* Disable interrupts */
-       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
        /* Disable EMAC */
        arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 
        len = max_t(unsigned int, ETH_ZLEN, skb->len);
 
-       /* EMAC still holds this buffer in its possession.
-        * CPU must not modify this buffer descriptor
-        */
-       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+       if (unlikely(!arc_emac_tx_avail(priv))) {
                netif_stop_queue(ndev);
+               netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
                return NETDEV_TX_BUSY;
        }
 
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        /* Increment index to point to the next BD */
        *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
 
-       /* Get "info" of the next BD */
-       info = &priv->txbd[*txbd_curr].info;
+       /* Ensure that tx_clean() sees the new txbd_curr before
+        * checking the queue status. This prevents an unneeded wake
+        * of the queue in tx_clean().
+        */
+       smp_mb();
 
-       /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
-       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+       if (!arc_emac_tx_avail(priv)) {
                netif_stop_queue(ndev);
+               /* Refresh tx_dirty */
+               smp_mb();
+               if (arc_emac_tx_avail(priv))
+                       netif_start_queue(ndev);
+       }
 
        arc_reg_set(priv, R_STATUS, TXPL_MASK);
 
index 7dcfb19a31c888894121aa03c83bc8fe439bc4cf..d8d07a818b89bc694a77b2bdc3172f5bbb6d7010 100644 (file)
@@ -84,7 +84,7 @@ config BNX2
 
 config CNIC
        tristate "QLogic CNIC support"
-       depends on PCI
+       depends on PCI && (IPV6 || IPV6=n)
        select BNX2
        select UIO
        ---help---
index 4a7028d6591287a4e0bd3a03ee339528d05e1f98..d588136b23b93c6dad513384a3a0ef3e4a436a2d 100644 (file)
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
                                     hwstat->tx_underruns +
                                     hwstat->tx_excessive_cols +
                                     hwstat->tx_late_cols);
-               nstat->multicast  = hwstat->tx_multicast_pkts;
+               nstat->multicast  = hwstat->rx_multicast_pkts;
                nstat->collisions = hwstat->tx_total_cols;
 
                nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
index 6f4e18644bd4e5089c7485acd7417a0adff76bff..d9b9170ed2fc329d5d5c8522fdc3803ec90cb49b 100644 (file)
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        while ((processed < to_process) && (processed < budget)) {
                cb = &priv->rx_cbs[priv->rx_read_ptr];
                skb = cb->skb;
+
+               processed++;
+               priv->rx_read_ptr++;
+
+               if (priv->rx_read_ptr == priv->num_rx_bds)
+                       priv->rx_read_ptr = 0;
+
+               /* We do not have a backing SKB, so we do not a corresponding
+                * DMA mapping for this incoming packet since
+                * bcm_sysport_rx_refill always either has both skb and mapping
+                * or none.
+                */
+               if (unlikely(!skb)) {
+                       netif_err(priv, rx_err, ndev, "out of memory!\n");
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       goto refill;
+               }
+
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
                                 RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
                          DESC_STATUS_MASK;
 
-               processed++;
-               priv->rx_read_ptr++;
-               if (priv->rx_read_ptr == priv->num_rx_bds)
-                       priv->rx_read_ptr = 0;
-
                netif_dbg(priv, rx_status, ndev,
                          "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
                          p_index, priv->rx_c_index, priv->rx_read_ptr,
                          len, status);
 
-               if (unlikely(!skb)) {
-                       netif_err(priv, rx_err, ndev, "out of memory!\n");
-                       ndev->stats.rx_dropped++;
-                       ndev->stats.rx_errors++;
-                       goto refill;
-               }
-
                if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
                        netif_err(priv, rx_status, ndev, "fragmented packet!\n");
                        ndev->stats.rx_dropped++;
index 2fee73b878c2e90542a58b86c0375f886f3017d6..823d01c5684caf3373d51ee09fe615a0e8e7a07b 100644 (file)
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 
                skb->protocol = eth_type_trans(skb, bp->dev);
 
-               if ((len > (bp->dev->mtu + ETH_HLEN)) &&
-                       (ntohs(skb->protocol) != 0x8100)) {
+               if (len > (bp->dev->mtu + ETH_HLEN) &&
+                   skb->protocol != htons(0x8100) &&
+                   skb->protocol != htons(ETH_P_8021AD)) {
 
                        dev_kfree_skb(skb);
                        goto next_rx;
index 5ba8af50c84f2bb3ebf9300c9ad8765d25a06a8d..c4daa068f1db5e37bdea6f160dde26b7c13d1659 100644 (file)
@@ -2233,7 +2233,12 @@ struct shmem2_region {
        u32 reserved3;                          /* Offset 0x14C */
        u32 reserved4;                          /* Offset 0x150 */
        u32 link_attr_sync[PORT_MAX];           /* Offset 0x154 */
-       #define LINK_ATTR_SYNC_KR2_ENABLE       (1<<0)
+       #define LINK_ATTR_SYNC_KR2_ENABLE       0x00000001
+       #define LINK_SFP_EEPROM_COMP_CODE_MASK  0x0000ff00
+       #define LINK_SFP_EEPROM_COMP_CODE_SHIFT          8
+       #define LINK_SFP_EEPROM_COMP_CODE_SR    0x00001000
+       #define LINK_SFP_EEPROM_COMP_CODE_LR    0x00002000
+       #define LINK_SFP_EEPROM_COMP_CODE_LRM   0x00004000
 
        u32 reserved5[2];
        u32 reserved6[PORT_MAX];
index 53fb4fa61b405aa540239492fef2d5a4f94fa9bd..549549eaf580f79518295bd4a13234cef8520968 100644 (file)
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
                         LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
 
 #define SFP_EEPROM_CON_TYPE_ADDR               0x2
+       #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
        #define SFP_EEPROM_CON_TYPE_VAL_LC      0x7
        #define SFP_EEPROM_CON_TYPE_VAL_COPPER  0x21
        #define SFP_EEPROM_CON_TYPE_VAL_RJ45    0x22
 
 
-#define SFP_EEPROM_COMP_CODE_ADDR              0x3
-       #define SFP_EEPROM_COMP_CODE_SR_MASK    (1<<4)
-       #define SFP_EEPROM_COMP_CODE_LR_MASK    (1<<5)
-       #define SFP_EEPROM_COMP_CODE_LRM_MASK   (1<<6)
+#define SFP_EEPROM_10G_COMP_CODE_ADDR          0x3
+       #define SFP_EEPROM_10G_COMP_CODE_SR_MASK        (1<<4)
+       #define SFP_EEPROM_10G_COMP_CODE_LR_MASK        (1<<5)
+       #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK       (1<<6)
+
+#define SFP_EEPROM_1G_COMP_CODE_ADDR           0x6
+       #define SFP_EEPROM_1G_COMP_CODE_SX      (1<<0)
+       #define SFP_EEPROM_1G_COMP_CODE_LX      (1<<1)
+       #define SFP_EEPROM_1G_COMP_CODE_CX      (1<<2)
+       #define SFP_EEPROM_1G_COMP_CODE_BASE_T  (1<<3)
 
 #define SFP_EEPROM_FC_TX_TECH_ADDR             0x8
        #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
                                 reg_set[i].val);
 
        /* Start KR2 work-around timer which handles BCM8073 link-parner */
-       vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
-       bnx2x_update_link_attr(params, vars->link_attr_sync);
+       params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, params->link_attr_sync);
 }
 
 static void bnx2x_disable_kr2(struct link_params *params,
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params,
        for (i = 0; i < ARRAY_SIZE(reg_set); i++)
                bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
                                 reg_set[i].val);
-       vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
-       bnx2x_update_link_attr(params, vars->link_attr_sync);
+       params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, params->link_attr_sync);
 
        vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
 }
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params,
                                        ~FEATURE_CONFIG_PFC_ENABLED;
 
        if (SHMEM2_HAS(bp, link_attr_sync))
-               vars->link_attr_sync = SHMEM2_RD(bp,
+               params->link_attr_sync = SHMEM2_RD(bp,
                                                 link_attr_sync[params->port]);
 
        DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u32 sync_offset = 0, phy_idx, media_types;
-       u8 gport, val[2], check_limiting_mode = 0;
+       u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
        *edc_mode = EDC_MODE_LIMITING;
        phy->media_type = ETH_PHY_UNSPECIFIED;
        /* First check for copper cable */
        if (bnx2x_read_sfp_module_eeprom(phy,
                                         params,
                                         I2C_DEV_ADDR_A0,
-                                        SFP_EEPROM_CON_TYPE_ADDR,
-                                        2,
+                                        0,
+                                        SFP_EEPROM_FC_TX_TECH_ADDR + 1,
                                         (u8 *)val) != 0) {
                DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
                return -EINVAL;
        }
-
-       switch (val[0]) {
+       params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+       params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+               LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+       bnx2x_update_link_attr(params, params->link_attr_sync);
+       switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
        case SFP_EEPROM_CON_TYPE_VAL_COPPER:
        {
                u8 copper_module_type;
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                /* Check if its active cable (includes SFP+ module)
                 * of passive cable
                 */
-               if (bnx2x_read_sfp_module_eeprom(phy,
-                                              params,
-                                              I2C_DEV_ADDR_A0,
-                                              SFP_EEPROM_FC_TX_TECH_ADDR,
-                                              1,
-                                              &copper_module_type) != 0) {
-                       DP(NETIF_MSG_LINK,
-                               "Failed to read copper-cable-type"
-                               " from SFP+ EEPROM\n");
-                       return -EINVAL;
-               }
+               copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
 
                if (copper_module_type &
                    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                }
                break;
        }
+       case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
        case SFP_EEPROM_CON_TYPE_VAL_LC:
        case SFP_EEPROM_CON_TYPE_VAL_RJ45:
                check_limiting_mode = 1;
-               if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
-                              SFP_EEPROM_COMP_CODE_LR_MASK |
-                              SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+               if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+                    (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+                     SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+                     SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
                        DP(NETIF_MSG_LINK, "1G SFP module detected\n");
-                       gport = params->port;
                        phy->media_type = ETH_PHY_SFP_1G_FIBER;
                        if (phy->req_line_speed != SPEED_1000) {
+                               u8 gport = params->port;
                                phy->req_line_speed = SPEED_1000;
                                if (!CHIP_IS_E1x(bp)) {
                                        gport = BP_PATH(bp) +
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                                           "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
                                           gport);
                        }
+                       if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
+                           SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+                               bnx2x_sfp_set_transmitter(params, phy, 0);
+                               msleep(40);
+                               bnx2x_sfp_set_transmitter(params, phy, 1);
+                       }
                } else {
                        int idx, cfg_idx = 0;
                        DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                break;
        default:
                DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
-                        val[0]);
+                        val[SFP_EEPROM_CON_TYPE_ADDR]);
                return -EINVAL;
        }
        sync_offset = params->shmem_base +
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
 
        sigdet = bnx2x_warpcore_get_sigdet(phy, params);
        if (!sigdet) {
-               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+               if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
                        bnx2x_kr2_recovery(params, vars, phy);
                        DP(NETIF_MSG_LINK, "No sigdet\n");
                }
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
 
        /* CL73 has not begun yet */
        if (base_page == 0) {
-               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+               if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
                        bnx2x_kr2_recovery(params, vars, phy);
                        DP(NETIF_MSG_LINK, "No BP\n");
                }
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
                            ((next_page & 0xe0) == 0x20))));
 
        /* In case KR2 is already disabled, check if we need to re-enable it */
-       if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+       if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
                if (!not_kr2_device) {
                        DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
                           next_page);
index 389f5f8cb0a3c3108f2305b62c0843037ea4f72e..d9cce4c3899b7b9d388cf28a6f4bf0feece3c4b2 100644 (file)
@@ -323,6 +323,9 @@ struct link_params {
 #define LINK_FLAGS_INT_DISABLED                (1<<0)
 #define PHY_INITIALIZED                (1<<1)
        u32 lfa_base;
+
+       /* The same definitions as the shmem2 parameter */
+       u32 link_attr_sync;
 };
 
 /* Output parameters */
@@ -364,8 +367,6 @@ struct link_vars {
        u8 rx_tx_asic_rst;
        u8 turn_to_run_wc_rt;
        u16 rsrv2;
-       /* The same definitions as the shmem2 parameter */
-       u32 link_attr_sync;
 };
 
 /***********************************************************/
index 900cab42081068e699e4bc8e2e0bec71659c604d..d1c093dcb054aebb71b1ee701cf175e8ab804515 100644 (file)
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp)
        bnx2x_release_phy_lock(bp);
 }
 
+static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+{
+       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+
+       /* make sure this value is 0 */
+       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+}
+
+static void bnx2x_set_endianity(struct bnx2x *bp)
+{
+#ifdef __BIG_ENDIAN
+       bnx2x_config_endianity(bp, 1);
+#else
+       bnx2x_config_endianity(bp, 0);
+#endif
+}
+
+static void bnx2x_reset_endianity(struct bnx2x *bp)
+{
+       bnx2x_config_endianity(bp, 0);
+}
+
 /**
  * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
  *
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
 
        bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
        bnx2x_init_pxp(bp);
-
-#ifdef __BIG_ENDIAN
-       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
-       /* make sure this value is 0 */
-       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
-
-/*     REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
-       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
-#endif
-
+       bnx2x_set_endianity(bp);
        bnx2x_ilt_init_page_size(bp, INITOP_SET);
 
        if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
@@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev,
        bnx2x_iov_remove_one(bp);
 
        /* Power on: we can't let PCI layer write to us while we are in D3 */
-       if (IS_PF(bp))
+       if (IS_PF(bp)) {
                bnx2x_set_power_state(bp, PCI_D0);
 
+               /* Set endianity registers to reset values in case next driver
+                * boots in different endianty environment.
+                */
+               bnx2x_reset_endianity(bp);
+       }
+
        /* Disable MSI/MSI-X */
        bnx2x_disable_msi(bp);
 
index 27861a6c7ca55966048b4cb94b0b92a0dde1d269..a6a9f284c8dd762579e062a9d9d62f03e1ce6f96 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/if_vlan.h>
 #include <linux/prefetch.h>
 #include <linux/random.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define BCM_VLAN 1
 #endif
 #include <net/ip.h>
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
                             struct dst_entry **dst)
 {
-#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_IPV6)
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
index 3f9d4de8173cdd0f86589925cf48a78d0ab7648d..5cc9cae21ed504fa0b7d71475adcc2a3e43a3742 100644 (file)
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
        int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
+       unsigned int bds_compl;
        unsigned int c_index;
 
        /* Compute how many buffers are transmitted since last xmit call */
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
        /* Reclaim transmitted buffers */
        while (last_tx_cn-- > 0) {
                tx_cb_ptr = ring->cbs + last_c_index;
+               bds_compl = 0;
                if (tx_cb_ptr->skb) {
+                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
                dev->stats.tx_packets++;
-               ring->free_bds += 1;
+               ring->free_bds += bds_compl;
 
                last_c_index++;
                last_c_index &= (num_tx_bds - 1);
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 
        while ((rxpktprocessed < rxpkttoprocess) &&
               (rxpktprocessed < budget)) {
+               cb = &priv->rx_cbs[priv->rx_read_ptr];
+               skb = cb->skb;
+
+               rxpktprocessed++;
+
+               priv->rx_read_ptr++;
+               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+               /* We do not have a backing SKB, so we do not have a
+                * corresponding DMA mapping for this incoming packet since
+                * bcmgenet_rx_refill always either has both skb and mapping or
+                * none.
+                */
+               if (unlikely(!skb)) {
+                       dev->stats.rx_dropped++;
+                       dev->stats.rx_errors++;
+                       goto refill;
+               }
+
                /* Unmap the packet contents such that we can use the
                 * RSV from the 64 bytes descriptor when enabled and save
                 * a 32-bits register read
                 */
-               cb = &priv->rx_cbs[priv->rx_read_ptr];
-               skb = cb->skb;
                dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
                                 priv->rx_buf_len, DMA_FROM_DEVICE);
 
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                          __func__, p_index, priv->rx_c_index,
                          priv->rx_read_ptr, dma_length_status);
 
-               rxpktprocessed++;
-
-               priv->rx_read_ptr++;
-               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
-               /* out of memory, just drop packets at the hardware level */
-               if (unlikely(!skb)) {
-                       dev->stats.rx_dropped++;
-                       dev->stats.rx_errors++;
-                       goto refill;
-               }
-
                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
                        netif_err(priv, rx_status, dev,
                                  "dropping fragmented packet!\n");
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
 }
 
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+       int ret = 0;
+       int timeout = 0;
+       u32 reg;
+
+       /* Disable TDMA to stop add more frames in TX DMA */
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+       /* Check TDMA status register to confirm TDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
+               ret = -ETIMEDOUT;
+       }
+
+       /* Wait 10ms for packet drain in both tx and rx dma */
+       usleep_range(10000, 20000);
+
+       /* Disable RDMA */
+       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+       timeout = 0;
+       /* Check RDMA status register to confirm RDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+               ret = -ETIMEDOUT;
+       }
+
+       return ret;
+}
+
 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
 
        /* disable DMA */
-       bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
-       bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+       bcmgenet_dma_teardown(priv);
 
        for (i = 0; i < priv->num_tx_bds; i++) {
                if (priv->tx_cbs[i].skb != NULL) {
@@ -2101,57 +2159,6 @@ err_clk_disable:
        return ret;
 }
 
-static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
-{
-       int ret = 0;
-       int timeout = 0;
-       u32 reg;
-
-       /* Disable TDMA to stop add more frames in TX DMA */
-       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
-       reg &= ~DMA_EN;
-       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
-       /* Check TDMA status register to confirm TDMA is disabled */
-       while (timeout++ < DMA_TIMEOUT_VAL) {
-               reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
-               if (reg & DMA_DISABLED)
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
-               ret = -ETIMEDOUT;
-       }
-
-       /* Wait 10ms for packet drain in both tx and rx dma */
-       usleep_range(10000, 20000);
-
-       /* Disable RDMA */
-       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
-       reg &= ~DMA_EN;
-       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
-       timeout = 0;
-       /* Check RDMA status register to confirm RDMA is disabled */
-       while (timeout++ < DMA_TIMEOUT_VAL) {
-               reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
-               if (reg & DMA_DISABLED)
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
-               ret = -ETIMEDOUT;
-       }
-
-       return ret;
-}
-
 static void bcmgenet_netif_stop(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
index 3ac5d23454a8dae9b2e618205d14dc91d822f1fe..ba499489969a03af0e4500c574c41d98bca0deae 100644 (file)
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                skb->protocol = eth_type_trans(skb, tp->dev);
 
                if (len > (tp->dev->mtu + ETH_HLEN) &&
-                   skb->protocol != htons(ETH_P_8021Q)) {
+                   skb->protocol != htons(ETH_P_8021Q) &&
+                   skb->protocol != htons(ETH_P_8021AD)) {
                        dev_kfree_skb_any(skb);
                        goto drop_it_no_recycle;
                }
@@ -7914,8 +7915,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        entry = tnapi->tx_prod;
        base_flags = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               base_flags |= TXD_FLAG_TCPUDP_CSUM;
 
        mss = skb_shinfo(skb)->gso_size;
        if (mss) {
@@ -7929,6 +7928,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 
+               /* HW/FW can not correctly segment packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD))
+                       return tg3_tso_bug(tp, tnapi, txq, skb);
+
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
                            tg3_flag(tp, TSO_BUG))
@@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                base_flags |= tsflags << 12;
                        }
                }
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               /* HW/FW can not correctly checksum packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (skb_checksum_help(skb))
+                               goto drop;
+               } else  {
+                       base_flags |= TXD_FLAG_TCPUDP_CSUM;
+               }
        }
 
        if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
@@ -11617,6 +11634,12 @@ static int tg3_open(struct net_device *dev)
        struct tg3 *tp = netdev_priv(dev);
        int err;
 
+       if (tp->pcierr_recovery) {
+               netdev_err(dev, "Failed to open device. PCI error recovery "
+                          "in progress\n");
+               return -EAGAIN;
+       }
+
        if (tp->fw_needed) {
                err = tg3_request_firmware(tp);
                if (tg3_asic_rev(tp) == ASIC_REV_57766) {
@@ -11674,6 +11697,12 @@ static int tg3_close(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
+       if (tp->pcierr_recovery) {
+               netdev_err(dev, "Failed to close device. PCI error recovery "
+                          "in progress\n");
+               return -EAGAIN;
+       }
+
        tg3_ptp_fini(tp);
 
        tg3_stop(tp);
@@ -17561,6 +17590,7 @@ static int tg3_init_one(struct pci_dev *pdev,
        tp->rx_mode = TG3_DEF_RX_MODE;
        tp->tx_mode = TG3_DEF_TX_MODE;
        tp->irq_sync = 1;
+       tp->pcierr_recovery = false;
 
        if (tg3_debug > 0)
                tp->msg_enable = tg3_debug;
@@ -18071,6 +18101,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
+       tp->pcierr_recovery = true;
+
        /* We probably don't have netdev yet */
        if (!netdev || !netif_running(netdev))
                goto done;
@@ -18195,6 +18227,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
        tg3_phy_start(tp);
 
 done:
+       tp->pcierr_recovery = false;
        rtnl_unlock();
 }
 
index 461accaf0aa40242c3756880dd6659371cdfe5f0..31c9f829595384cb843e299c6b51054101d8f6c6 100644 (file)
@@ -3407,6 +3407,7 @@ struct tg3 {
 
        struct device                   *hwmon_dev;
        bool                            link_up;
+       bool                            pcierr_recovery;
 };
 
 /* Accessor macros for chip and asic attributes
index ff8cae5e2535b6e068159180105f84b2ff922b1d..ffc92a41d75be550d27698af6ca3e600d9a146fe 100644 (file)
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
         * For TSO, the TCP checksum field is seeded with pseudo-header sum
         * excluding the length field.
         */
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
 
                /* Do we really need these? */
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
                }
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       __be16 net_proto = vlan_get_protocol(skb);
                        u8 proto = 0;
 
-                       if (skb->protocol == htons(ETH_P_IP))
+                       if (net_proto == htons(ETH_P_IP))
                                proto = ip_hdr(skb)->protocol;
 #ifdef NETIF_F_IPV6_CSUM
-                       else if (skb->protocol == htons(ETH_P_IPV6)) {
+                       else if (net_proto == htons(ETH_P_IPV6)) {
                                /* nexthdr may not be TCP immediately. */
                                proto = ipv6_hdr(skb)->nexthdr;
                        }
index ca5d7798b2651888e651c454bb0ae94f964ca438..e1e02fba4fcc55e663dfc31777f281f26463896c 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/of_device.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
-#include <linux/pinctrl/consumer.h>
 
 #include "macb.h"
 
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev)
        struct phy_device *phydev;
        u32 config;
        int err = -ENXIO;
-       struct pinctrl *pinctrl;
        const char *mac;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev)
                goto err_out;
        }
 
-       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(pinctrl)) {
-               err = PTR_ERR(pinctrl);
-               if (err == -EPROBE_DEFER)
-                       goto err_out;
-
-               dev_warn(&pdev->dev, "No pinctrl provided\n");
-       }
-
        err = -ENOMEM;
        dev = alloc_etherdev(sizeof(*bp));
        if (!dev)
index 184a063bed5fa59bbdc705e29355134a2d31d731..07d2201530d26c85e26cf0987553451acad936a6 100644 (file)
@@ -1,6 +1,7 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
        depends on HAS_IOMEM && HAS_DMA
+       depends on ARCH_HIGHBANK || COMPILE_TEST
        select CRC32
        help
          This is the driver for the XGMAC Ethernet IP block found on Calxeda
index 18fb9c61d7bacfd19319a664d2287786938ecec7..e5be511a3c38b13d97af70947341d031ce62acce 100644 (file)
@@ -1253,7 +1253,9 @@ freeout:  t4_free_sge_resources(adap);
                        goto freeout;
        }
 
-       t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+       t4_write_reg(adap, is_t4(adap->params.chip) ?
+                               MPS_TRC_RSS_CONTROL :
+                               MPS_T5_TRC_RSS_CONTROL,
                     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
                     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
        return 0;
@@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
                0xd004, 0xd03c,
                0xdfc0, 0xdfe0,
                0xe000, 0xea7c,
-               0xf000, 0x11190,
+               0xf000, 0x11110,
+               0x11118, 0x11190,
                0x19040, 0x1906c,
                0x19078, 0x19080,
                0x1908c, 0x19124,
@@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
                0xd004, 0xd03c,
                0xdfc0, 0xdfe0,
                0xe000, 0x11088,
-               0x1109c, 0x1117c,
+               0x1109c, 0x11110,
+               0x11118, 0x1117c,
                0x11190, 0x11204,
                0x19040, 0x1906c,
                0x19078, 0x19080,
@@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(CQ_END);
                params[4] = FW_PARAM_PFVF(OCQ_START);
                params[5] = FW_PARAM_PFVF(OCQ_END);
-               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+                                     val);
                if (ret < 0)
                        goto bye;
                adap->vres.qp.start = val[0];
@@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap)
 
                params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
                params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
-               ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+                                     val);
                if (ret < 0) {
                        adap->params.max_ordird_qp = 8;
                        adap->params.max_ird_adapter = 32 * adap->tids.ntids;
@@ -6472,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct port_info *pi;
        bool highdma = false;
        struct adapter *adapter = NULL;
+       void __iomem *regs;
 
        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -6488,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_release_regions;
        }
 
+       regs = pci_ioremap_bar(pdev, 0);
+       if (!regs) {
+               dev_err(&pdev->dev, "cannot map device registers\n");
+               err = -ENOMEM;
+               goto out_disable_device;
+       }
+
+       /* We control everything through one PF */
+       func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+       if (func != ent->driver_data) {
+               iounmap(regs);
+               pci_disable_device(pdev);
+               pci_save_state(pdev);        /* to restore SR-IOV later */
+               goto sriov;
+       }
+
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                highdma = true;
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
                if (err) {
                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
                                "coherent allocations\n");
-                       goto out_disable_device;
+                       goto out_unmap_bar0;
                }
        } else {
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "no usable DMA configuration\n");
-                       goto out_disable_device;
+                       goto out_unmap_bar0;
                }
        }
 
@@ -6512,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
        if (!adapter) {
                err = -ENOMEM;
-               goto out_disable_device;
+               goto out_unmap_bar0;
        }
 
        adapter->workq = create_singlethread_workqueue("cxgb4");
@@ -6524,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* PCI device has been enabled */
        adapter->flags |= DEV_ENABLED;
 
-       adapter->regs = pci_ioremap_bar(pdev, 0);
-       if (!adapter->regs) {
-               dev_err(&pdev->dev, "cannot map device registers\n");
-               err = -ENOMEM;
-               goto out_free_adapter;
-       }
-
-       /* We control everything through one PF */
-       func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
-       if (func != ent->driver_data) {
-               pci_save_state(pdev);        /* to restore SR-IOV later */
-               goto sriov;
-       }
-
+       adapter->regs = regs;
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
        adapter->mbox = func;
@@ -6554,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = t4_prep_adapter(adapter);
        if (err)
-               goto out_unmap_bar0;
+               goto out_free_adapter;
+
 
        if (!is_t4(adapter->params.chip)) {
                s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@@ -6571,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        dev_err(&pdev->dev,
                                "Incorrect number of egress queues per page\n");
                        err = -EINVAL;
-                       goto out_unmap_bar0;
+                       goto out_free_adapter;
                }
                adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
                pci_resource_len(pdev, 2));
                if (!adapter->bar2) {
                        dev_err(&pdev->dev, "cannot map device bar2 region\n");
                        err = -ENOMEM;
-                       goto out_unmap_bar0;
+                       goto out_free_adapter;
                }
        }
 
@@ -6716,13 +6727,13 @@ sriov:
  out_unmap_bar:
        if (!is_t4(adapter->params.chip))
                iounmap(adapter->bar2);
- out_unmap_bar0:
-       iounmap(adapter->regs);
  out_free_adapter:
        if (adapter->workq)
                destroy_workqueue(adapter->workq);
 
        kfree(adapter);
+ out_unmap_bar0:
+       iounmap(regs);
  out_disable_device:
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
index a853133d8db826029ac042006e89c6d99348f2d7..41d04462b72eb158f7699c38000e6331291d2447 100644 (file)
@@ -167,6 +167,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
        t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
 }
 
+/*
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+       static const char *const reason[] = {
+               "Crash",                        /* PCIE_FW_EVAL_CRASH */
+               "During Device Preparation",    /* PCIE_FW_EVAL_PREP */
+               "During Device Configuration",  /* PCIE_FW_EVAL_CONF */
+               "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+               "Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+               "Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
+               "Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+               "Reserved",                     /* reserved */
+       };
+       u32 pcie_fw;
+
+       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+       if (pcie_fw & FW_PCIE_FW_ERR)
+               dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
+                       reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
+}
+
 /*
  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  */
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        dump_mbox(adap, mbox, data_reg);
        dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
                *(const u8 *)cmd, mbox);
+       t4_report_fw_error(adap);
        return -ETIMEDOUT;
 }
 
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
 #define VPD_LEN            1024
+#define CHELSIO_VPD_UNIQUE_ID 0x82
 
 /**
  *     t4_seeprom_wp - enable/disable EEPROM write protection
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
        if (ret < 0)
                goto out;
-       addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+       /* The VPD shall have a unique identifier specified by the PCI SIG.
+        * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
+        * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
+        * is expected to automatically put this entry at the
+        * beginning of the VPD.
+        */
+       addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
 
        ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
        if (ret < 0)
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
        strim(p->sn);
+       i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->pn, vpd + pn, min(i, PN_LEN));
        strim(p->pn);
 
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter)
 
        int fat;
 
-       fat = t4_handle_intr_status(adapter,
-                                   PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
-                                   sysbus_intr_info) +
-             t4_handle_intr_status(adapter,
-                                   PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
-                                   pcie_port_intr_info) +
-             t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
-                                   is_t4(adapter->params.chip) ?
-                                   pcie_intr_info : t5_pcie_intr_info);
+       if (is_t4(adapter->params.chip))
+               fat = t4_handle_intr_status(adapter,
+                                           PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+                                           sysbus_intr_info) +
+                       t4_handle_intr_status(adapter,
+                                             PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+                                             pcie_port_intr_info) +
+                       t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+                                             pcie_intr_info);
+       else
+               fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+                                           t5_pcie_intr_info);
 
        if (fat)
                t4_fatal_err(adapter);
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter)
 
        int fat;
 
+       if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+               t4_report_fw_error(adapter);
+
        fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
                                    cim_intr_info) +
              t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap)
 {
        u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
 
-       if (status & MEM_PERR_INT_CAUSE)
+       if (status & MEM_PERR_INT_CAUSE) {
                dev_alert(adap->pdev_dev,
                          "MA parity error, parity status %#x\n",
                          t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+               if (is_t5(adap->params.chip))
+                       dev_alert(adap->pdev_dev,
+                                 "MA parity error, parity status %#x\n",
+                                 t4_read_reg(adap,
+                                             MA_PARITY_ERROR_STATUS2));
+       }
        if (status & MEM_WRAP_INT_CAUSE) {
                v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
                dev_alert(adap->pdev_dev, "MA address wrap-around error by "
@@ -2733,12 +2783,16 @@ retry:
        /*
         * Issue the HELLO command to the firmware.  If it's not successful
         * but indicates that we got a "busy" or "timeout" condition, retry
-        * the HELLO until we exhaust our retry limit.
+        * the HELLO until we exhaust our retry limit.  If we do exceed our
+        * retry limit, check to see if the firmware left us any error
+        * information and report that if so.
         */
        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
        if (ret < 0) {
                if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
                        goto retry;
+               if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+                       t4_report_fw_error(adap);
                return ret;
        }
 
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
                        lc->link_ok = link_ok;
                        lc->speed = speed;
                        lc->fc = fc;
+                       lc->supported = be16_to_cpu(p->u.info.pcap);
                        t4_os_link_changed(adap, port, link_ok);
                }
                if (mod != pi->mod_type) {
index e3146e83df2043ae59436e7eff6b8c276a3fb3e4..39fb325474f7e7499534d142d47cdcd0b7fbc1f4 100644 (file)
 #define  MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
 #define MA_PCIE_FW 0x30b8
 #define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_PARITY_ERROR_STATUS2 0x7804
 
 #define MA_EXT_MEMORY1_BAR 0x7808
 #define EDC_0_BASE_ADDR 0x7900
 #define  TRCMULTIFILTER     0x00000001U
 
 #define MPS_TRC_RSS_CONTROL 0x9808
+#define MPS_T5_TRC_RSS_CONTROL 0xa00c
 #define  RSSCONTROL_MASK    0x00ff0000U
 #define  RSSCONTROL_SHIFT   16
 #define  RSSCONTROL(x)      ((x) << RSSCONTROL_SHIFT)
index 5f2729ebadbe14c4d6c1e56fcc29d078380db437..3409756a85b95586f7640033dc06bb274c4db139 100644 (file)
@@ -2228,6 +2228,10 @@ struct fw_debug_cmd {
 #define FW_PCIE_FW_MASTER(x)     ((x) << FW_PCIE_FW_MASTER_SHIFT)
 #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
                                 FW_PCIE_FW_MASTER_MASK)
+#define FW_PCIE_FW_EVAL_MASK   0x7
+#define FW_PCIE_FW_EVAL_SHIFT  24
+#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
+                                FW_PCIE_FW_EVAL_MASK)
 
 struct fw_hdr {
        u8 ver;
index 9b33057a94779f239b2515269444dd8c4617a018..70089c29d307a43643222959202228403c31e97f 100644 (file)
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
        const void *mac_addr;
 
        if (!IS_ENABLED(CONFIG_OF) || !np)
-               return NULL;
+               return ERR_PTR(-ENXIO);
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index a0b418e007a0d09303a32838d8acf1a06ca9d061..566b17db135a306a2bb8eea0ce167a70e6e165f4 100644 (file)
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
 {
        swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
 
-       if (skb->protocol != htons(ETH_P_IP))
+       if (vlan_get_protocol(skb) != htons(ETH_P_IP))
                return;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
index cbc330b301cdcfb8dd7e62695700a4e4ee2920e6..ad3d5d12173faea9992f9e46f74f57e21043b05d 100644 (file)
@@ -2674,7 +2674,8 @@ set_itr_now:
 #define E1000_TX_FLAGS_VLAN_SHIFT      16
 
 static int e1000_tso(struct e1000_adapter *adapter,
-                    struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+                    struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+                    __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
 
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                mss = skb_shinfo(skb)->gso_size;
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->tot_len = 0;
                        iph->check = 0;
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
                                                                 0);
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb_transport_offset(skb) - 1;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (skb_is_gso_v6(skb)) {
                        ipv6_hdr(skb)->payload_len = 0;
                        tcp_hdr(skb)->check =
                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter,
 }
 
 static bool e1000_tx_csum(struct e1000_adapter *adapter,
-                         struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+                         struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+                         __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
        if (skb->ip_summed != CHECKSUM_PARTIAL)
                return false;
 
-       switch (skb->protocol) {
+       switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
                        cmd_len |= E1000_TXD_CMD_TCP;
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        int count = 0;
        int tso;
        unsigned int f;
+       __be16 protocol = vlan_get_protocol(skb);
 
        /* This goes back to the question of how to logically map a Tx queue
         * to a flow.  Right now, performance is impacted slightly negatively
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        first = tx_ring->next_to_use;
 
-       tso = e1000_tso(adapter, tx_ring, skb);
+       tso = e1000_tso(adapter, tx_ring, skb, protocol);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                if (likely(hw->mac_type != e1000_82544))
                        tx_ring->last_tx_tso = true;
                tx_flags |= E1000_TX_FLAGS_TSO;
-       } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+       } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
                tx_flags |= E1000_TX_FLAGS_CSUM;
 
-       if (likely(skb->protocol == htons(ETH_P_IP)))
+       if (protocol == htons(ETH_P_IP))
                tx_flags |= E1000_TX_FLAGS_IPV4;
 
        if (unlikely(skb->no_fcs))
index 65c3aef2bd36a2ee01bab5a11649caa589b83c95..247335d2c7ec26cb9c50bb93c6a675b4ff35879a 100644 (file)
@@ -5164,7 +5164,8 @@ link_up:
 #define E1000_TX_FLAGS_VLAN_MASK       0xffff0000
 #define E1000_TX_FLAGS_VLAN_SHIFT      16
 
-static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
+                    __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
 
        hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
        mss = skb_shinfo(skb)->gso_size;
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        return 1;
 }
 
-static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
+                         __be16 protocol)
 {
        struct e1000_adapter *adapter = tx_ring->adapter;
        struct e1000_context_desc *context_desc;
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        unsigned int i;
        u8 css;
        u32 cmd_len = E1000_TXD_CMD_DEXT;
-       __be16 protocol;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
                return false;
 
-       if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
-       else
-               protocol = skb->protocol;
-
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        int count = 0;
        int tso;
        unsigned int f;
+       __be16 protocol = vlan_get_protocol(skb);
 
        if (test_bit(__E1000_DOWN, &adapter->state)) {
                dev_kfree_skb_any(skb);
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        first = tx_ring->next_to_use;
 
-       tso = e1000_tso(tx_ring, skb);
+       tso = e1000_tso(tx_ring, skb, protocol);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        if (tso)
                tx_flags |= E1000_TX_FLAGS_TSO;
-       else if (e1000_tx_csum(tx_ring, skb))
+       else if (e1000_tx_csum(tx_ring, skb, protocol))
                tx_flags |= E1000_TX_FLAGS_CSUM;
 
        /* Old method was to assume IPv4 packet by default if TSO was enabled.
         * 82571 hardware supports TSO capabilities for IPv6 as well...
         * no longer assume, we must.
         */
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tx_flags |= E1000_TX_FLAGS_IPV4;
 
        if (unlikely(skb->no_fcs))
index a51aa37b7b5af10a5204c1ef8329ce9229b1ce57..369848e107f8ed37952b4849ed632655f598b6ba 100644 (file)
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
                goto out_drop;
 
        /* obtain protocol of skb */
-       protocol = skb->protocol;
+       protocol = vlan_get_protocol(skb);
 
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
index 79bf96ca648954b390dbf60728ae687181292097..95a3ec236b4951ab7166637159393df569da97fb 100644 (file)
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
                goto out_drop;
 
        /* obtain protocol of skb */
-       protocol = skb->protocol;
+       protocol = vlan_get_protocol(skb);
 
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
index c9f1d1b7ef378bef042b12c9e00f5bf2786594a8..ade067de168959b30c9e7eafc07ed40df0967ee0 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mbus.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <linux/io.h>
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
 {
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                int ip_hdr_len = 0;
+               __be16 l3_proto = vlan_get_protocol(skb);
                u8 l4_proto;
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (l3_proto == htons(ETH_P_IP)) {
                        struct iphdr *ip4h = ip_hdr(skb);
 
                        /* Calculate IPv4 checksum and L4 checksum */
                        ip_hdr_len = ip4h->ihl;
                        l4_proto = ip4h->protocol;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
                        /* Read l4_protocol from one of IPv6 extra headers */
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
                        return MVNETA_TX_L4_CSUM_NOT;
 
                return mvneta_txq_desc_csum(skb_network_offset(skb),
-                               skb->protocol, ip_hdr_len, l4_proto);
+                                           l3_proto, ip_hdr_len, l4_proto);
        }
 
        return MVNETA_TX_L4_CSUM_NOT;
index 65a4a0f88ea0d80090bc2e604fe0f7e2b0bb2fad..02a2e90d581a450fcf02f19b1924de14d0cb60ea 100644 (file)
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
 }
 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
 
+static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
+{
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+       int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+                       + 1;
+       int max_port = min_port +
+               bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+       if (port < min_port)
+               port = min_port;
+       else if (port >= max_port)
+               port = max_port - 1;
+
+       return port;
+}
+
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
        s_info->mac = mac;
        mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
 
        if ((0 == vlan) && (0 == qos))
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
        struct mlx4_priv *priv;
 
        priv = mlx4_priv(dev);
+       port = mlx4_slaves_closest_port(dev, slave, port);
        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 
        if (MLX4_VGT != vp_oper->state.default_vlan) {
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
        s_info->spoofchk = setting;
 
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        switch (link_state) {
        case IFLA_VF_LINK_STATE_AUTO:
                /* get current link state */
index e22f24f784fcead5e40102951ca6d00cbcfba2f5..35ff2925110a3f690098e62ab73a3321965be17f 100644 (file)
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
 
+       if (pause->autoneg)
+               return -EINVAL;
+
        priv->prof->tx_pause = pause->tx_pause != 0;
        priv->prof->rx_pause = pause->rx_pause != 0;
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
index bb536aa613f483434f35c4bde31ae36ab9ced290..abddcf8c40aa120c4d4ade2822ccef68d00ea434 100644 (file)
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
                                    int qpn, u64 *reg_id)
 {
        int err;
-       struct mlx4_spec_list spec_eth_outer = { {NULL} };
-       struct mlx4_spec_list spec_vxlan     = { {NULL} };
-       struct mlx4_spec_list spec_eth_inner = { {NULL} };
-
-       struct mlx4_net_trans_rule rule = {
-               .queue_mode = MLX4_NET_TRANS_Q_FIFO,
-               .exclusive = 0,
-               .allow_loopback = 1,
-               .promisc_mode = MLX4_FS_REGULAR,
-               .priority = MLX4_DOMAIN_NIC,
-       };
-
-       __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
 
        if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
                return 0; /* do nothing */
 
-       rule.port = priv->port;
-       rule.qpn = qpn;
-       INIT_LIST_HEAD(&rule.list);
-
-       spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
-       memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
-       memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
-
-       spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN;    /* any vxlan header */
-       spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH;  /* any inner eth header */
-
-       list_add_tail(&spec_eth_outer.list, &rule.list);
-       list_add_tail(&spec_vxlan.list,     &rule.list);
-       list_add_tail(&spec_eth_inner.list, &rule.list);
-
-       err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+       err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
+                                   MLX4_DOMAIN_NIC, reg_id);
        if (err) {
                en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
                return err;
index 7e2d5d57c598269ed20b0328da784db71276814d..871e3a5bda38535c3dbe8c3649e335aaa2b4077c 100644 (file)
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
 #endif /* CONFIG_PCI_MSI */
 
 static uint8_t num_vfs[3] = {0, 0, 0};
-static int num_vfs_argc = 3;
+static int num_vfs_argc;
 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
                          "num_vfs=port1,port2,port1+2");
 
 static uint8_t probe_vf[3] = {0, 0, 0};
-static int probe_vfs_argc = 3;
+static int probe_vfs_argc;
 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
                           "probe_vf=port1,port2,port1+2");
index d80e7a6fac74c4381cea11f946c889216a5ce05d..ca0f98c951054945cd29fc3be2284fc7eeb7d729 100644 (file)
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
 }
 EXPORT_SYMBOL_GPL(mlx4_flow_detach);
 
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+                         int port, int qpn, u16 prio, u64 *reg_id)
+{
+       int err;
+       struct mlx4_spec_list spec_eth_outer = { {NULL} };
+       struct mlx4_spec_list spec_vxlan     = { {NULL} };
+       struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+       struct mlx4_net_trans_rule rule = {
+               .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+               .exclusive = 0,
+               .allow_loopback = 1,
+               .promisc_mode = MLX4_FS_REGULAR,
+       };
+
+       __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+       rule.port = port;
+       rule.qpn = qpn;
+       rule.priority = prio;
+       INIT_LIST_HEAD(&rule.list);
+
+       spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+       memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+       memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+       spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN;    /* any vxlan header */
+       spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH;  /* any inner eth header */
+
+       list_add_tail(&spec_eth_outer.list, &rule.list);
+       list_add_tail(&spec_vxlan.list,     &rule.list);
+       list_add_tail(&spec_eth_inner.list, &rule.list);
+
+       err = mlx4_flow_attach(dev, &rule, reg_id);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_tunnel_steer_add);
+
 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
                                      u32 max_range_qpn)
 {
index 7d717eccb7b042284cc861fa8da2af0d63addb11..193a6adb5d04d887e325f59793423c91414c1fcb 100644 (file)
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
                            MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
+/* Must protect against concurrent access */
 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                       struct mlx4_mpt_entry ***mpt_entry)
 {
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
        int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
        struct mlx4_cmd_mailbox *mailbox = NULL;
 
-       /* Make sure that at this point we have single-threaded access only */
-
        if (mmr->enabled != MLX4_MPT_EN_HW)
                return -EINVAL;
 
        err = mlx4_HW2SW_MPT(dev, NULL, key);
-
        if (err) {
                mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
                mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                                   0, MLX4_CMD_QUERY_MPT,
                                   MLX4_CMD_TIME_CLASS_B,
                                   MLX4_CMD_WRAPPED);
-
                if (err)
                        goto free_mailbox;
 
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                err = mlx4_SW2HW_MPT(dev, mailbox, key);
        }
 
-       mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
-       if (!err)
+       if (!err) {
+               mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
                mmr->enabled = MLX4_MPT_EN_HW;
+       }
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
                         u32 pdn)
 {
-       u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags);
+       u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
        /* The wrapper function will put the slave's id here */
        if (mlx4_is_mfunc(dev))
                pd_flags &= ~MLX4_MPT_PD_VF_MASK;
-       mpt_entry->pd_flags = cpu_to_be32((pd_flags &  ~MLX4_MPT_PD_MASK) |
+
+       mpt_entry->pd_flags = cpu_to_be32(pd_flags |
                                          (pdn & MLX4_MPT_PD_MASK)
                                          | MLX4_MPT_PD_FLAG_EN_INV);
        return 0;
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
 {
        int err;
 
-       mpt_entry->start       = cpu_to_be64(mr->iova);
-       mpt_entry->length      = cpu_to_be64(mr->size);
-       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+       mpt_entry->start       = cpu_to_be64(iova);
+       mpt_entry->length      = cpu_to_be64(size);
+       mpt_entry->entity_size = cpu_to_be32(page_shift);
 
        err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
        if (err)
                return err;
 
+       mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
+                                          MLX4_MPT_PD_FLAG_EN_INV);
+       mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
+                                          MLX4_MPT_FLAG_SW_OWNS);
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
                mpt_entry->mtt_addr = 0;
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
                if (mr->mtt.page_shift == 0)
                        mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
        }
+       if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+               /* fast register MR in free state */
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+               mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+                                                  MLX4_MPT_PD_FLAG_RAE);
+       } else {
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+       }
        mr->enabled = MLX4_MPT_EN_SW;
 
        return 0;
index 9ba0c1ca10d59ffe1a2be56b67439f0013b8ad59..94eeb2c7d7e439725be63bf48cd061d361816a9c 100644 (file)
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
        int i;
 
        for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if ((mac & MLX4_MAC_MASK) ==
+               if (table->refs[i] &&
+                   (MLX4_MAC_MASK & mac) ==
                    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
                        return i;
        }
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 
        mutex_lock(&table->mutex);
        for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if (free < 0 && !table->entries[i]) {
-                       free = i;
+               if (!table->refs[i]) {
+                       if (free < 0)
+                               free = i;
                        continue;
                }
 
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+               if ((MLX4_MAC_MASK & mac) ==
+                    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
                        /* MAC already registered, increment ref count */
                        err = i;
                        ++table->refs[i];
index 0dc31d85fc3b8ee4e56eb7c2738239fb7fa86f76..2301365c79c710b01d07d4a8be7353f2a371e03a 100644 (file)
@@ -390,13 +390,14 @@ err_icm:
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params)
 {
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_update_qp_context *cmd;
        u64 pri_addr_path_mask = 0;
+       u64 qp_mask = 0;
        int err = 0;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
                cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
        }
 
+       if (attr & MLX4_UPDATE_QP_VSD) {
+               qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
+               if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
+                       cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
+       }
+
        cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+       cmd->qp_mask = cpu_to_be64(qp_mask);
 
-       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+       err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
                       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
                       MLX4_CMD_NATIVE);
 
index 1089367fed22632e087b2da0bcd150cd1dce6f48..5d2498dcf536d5e96cc13b9274b00137d4508c5d 100644 (file)
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        struct mlx4_qp_context  *qpc = inbox->buf + 8;
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_priv *priv;
+       u32 qp_type;
        int port;
 
        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
        priv = mlx4_priv(dev);
        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+       qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 
        if (MLX4_VGT != vp_oper->state.default_vlan) {
                /* the reserved QPs (special, proxy, tunnel)
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                if (mlx4_is_qp_reserved(dev, qpn))
                        return 0;
 
-               /* force strip vlan by clear vsd */
-               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+               /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
+               if (qp_type == MLX4_QP_ST_UD ||
+                   (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
+                       if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
+                               *(__be32 *)inbox->buf =
+                                       cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
+                                       MLX4_QP_OPTPAR_VLAN_STRIPPING);
+                               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+                       } else {
+                               struct mlx4_update_qp_params params = {.flags = 0};
+
+                               mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                       }
+               }
 
                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
        }
 
        port = (rqp->sched_queue >> 6 & 1) + 1;
-       smac_index = cmd->qp_context.pri_path.grh_mylmc;
-       err = mac_find_smac_ix_in_slave(dev, slave, port,
-                                       smac_index, &mac);
-       if (err) {
-               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
-                        qpn, smac_index);
-               goto err_mac;
+
+       if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
+               smac_index = cmd->qp_context.pri_path.grh_mylmc;
+               err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                               smac_index, &mac);
+
+               if (err) {
+                       mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                                qpn, smac_index);
+                       goto err_mac;
+               }
        }
 
        err = mlx4_cmd(dev, inbox->dma,
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 
        upd_context = mailbox->buf;
-       upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
+       upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
index 5020fd47825d65c359f010a9bcbf0f2e9a0ceaf7..2f12c88c66abfa1cd0bb0e099e4a33a761327fd6 100644 (file)
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
        int rx_head = priv->rx_head;
        int rx = 0;
 
-       while (1) {
+       while (rx < budget) {
                desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
                desc0 = readl(desc + RX_REG_OFFSET_DESC0);
 
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                        net_dbg_ratelimited("packet error\n");
                        priv->stats.rx_dropped++;
                        priv->stats.rx_errors++;
-                       continue;
+                       goto rx_next;
                }
 
                len = desc0 & RX_DESC0_FRAME_LEN_MASK;
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                if (len > RX_BUF_SIZE)
                        len = RX_BUF_SIZE;
 
-               skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+               dma_sync_single_for_cpu(&ndev->dev,
+                                       priv->rx_mapping[rx_head],
+                                       priv->rx_buf_size, DMA_FROM_DEVICE);
+               skb = netdev_alloc_skb_ip_align(ndev, len);
+
                if (unlikely(!skb)) {
-                       net_dbg_ratelimited("build_skb failed\n");
+                       net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
                        priv->stats.rx_dropped++;
                        priv->stats.rx_errors++;
+                       goto rx_next;
                }
 
+               memcpy(skb->data, priv->rx_buf[rx_head], len);
                skb_put(skb, len);
                skb->protocol = eth_type_trans(skb, ndev);
                napi_gro_receive(&priv->napi, skb);
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                if (desc0 & RX_DESC0_MULTICAST)
                        priv->stats.multicast++;
 
+rx_next:
                writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
 
                rx_head = RX_NEXT(rx_head);
                priv->rx_head = rx_head;
-
-               if (rx >= budget)
-                       break;
        }
 
        if (rx < budget) {
-               napi_gro_flush(napi, false);
-               __napi_complete(napi);
+               napi_complete(napi);
        }
 
        priv->reg_imr |= RPKT_FINISH_M;
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                len = ETH_ZLEN;
        }
 
-       txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
-       txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
-       txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
-       txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+       dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+                                  priv->tx_buf_size, DMA_TO_DEVICE);
+
+       txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
+       if (tx_head == TX_DESC_NUM_MASK)
+               txdes1 |= TX_DESC1_END;
        writel(txdes1, desc + TX_REG_OFFSET_DESC1);
        writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
 
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
        spin_lock_init(&priv->txlock);
 
        priv->tx_buf_size = TX_BUF_SIZE;
-       priv->rx_buf_size = RX_BUF_SIZE +
-                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       priv->rx_buf_size = RX_BUF_SIZE;
 
        priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
index 8706c0dbd0c36a2c3b7af167eaa4c29cda3905a5..a44a03c45014903a6410ebe2d665f3651ee30db2 100644 (file)
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev)
 
        __lpc_eth_clock_enable(pldat, true);
 
+       /* Suspended PHY makes LPC ethernet core block, so resume now */
+       phy_resume(pldat->phy_dev);
+
        /* Reset and initialize */
        __lpc_eth_reset(pldat);
        __lpc_eth_init(pldat);
index 979c6980639f59ea8fcd46f0b7588cd9b3c2b239..a42293092ea4f8fe2b7fe96444bfaafada3c046a 100644 (file)
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
                /* Read the hardware TX timestamp if one was recorded */
                if (unlikely(re.s.tstamp)) {
                        struct skb_shared_hwtstamps ts;
+                       u64 ns;
+
                        memset(&ts, 0, sizeof(ts));
                        /* Read the timestamp */
-                       u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+                       ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
                        /* Remove the timestamp from the FIFO */
                        cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
                        /* Tell the kernel about the timestamp */
index 44c8be1c68051ec9a9b79f4ba481060022613e79..5f7a3521279639308914dcb4cb35e488e883dbd9 100644 (file)
@@ -7,6 +7,7 @@ config PCH_GBE
        depends on PCI && (X86_32 || COMPILE_TEST)
        select MII
        select PTP_1588_CLOCK_PCH
+       select NET_PTP_CLASSIFY
        ---help---
          This is a gigabit ethernet driver for EG20T PCH.
          EG20T PCH is the platform controller hub that is used in Intel's
index 32058614151ae8c498887da42278521e4f8bed3b..5c4068353f664e8924f832c3d681ec41f53465c6 100644 (file)
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
        int i, j;
        struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
 
+       spin_lock(&adapter->tx_clean_lock);
        cmd_buf = tx_ring->cmd_buf_arr;
        for (i = 0; i < tx_ring->num_desc; i++) {
                buffrag = cmd_buf->frag_array;
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
                }
                cmd_buf++;
        }
+       spin_unlock(&adapter->tx_clean_lock);
 }
 
 void netxen_free_sw_resources(struct netxen_adapter *adapter)
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
                        break;
        }
 
-       if (count && netif_running(netdev)) {
-               tx_ring->sw_consumer = sw_consumer;
+       tx_ring->sw_consumer = sw_consumer;
 
+       if (count && netif_running(netdev)) {
                smp_mb();
 
                if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
index 1159031f885b10bde26d5119e0aeaedd5641ee0c..5ec5a2b0e9895e02a0dd778d55c79a9d210685ef 100644 (file)
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
                return;
 
        smp_mb();
-       spin_lock(&adapter->tx_clean_lock);
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
 
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
        netxen_napi_disable(adapter);
 
        netxen_release_tx_buffers(adapter);
-       spin_unlock(&adapter->tx_clean_lock);
 }
 
 /* Usage: During suspend and firmware recovery module */
index 86783e1afcf76d77176a079e4420a50f7b452b62..3172cdf591fe1622c263d00f36ee12216205c57a 100644 (file)
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
 {
        u32 idc_params, val;
 
-       if (qlcnic_83xx_lockless_flash_read32(adapter,
-                                             QLC_83XX_IDC_FLASH_PARAM_ADDR,
-                                             (u8 *)&idc_params, 1)) {
+       if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
+                                    (u8 *)&idc_params, 1)) {
                dev_info(&adapter->pdev->dev,
                         "%s:failed to get IDC params from flash\n", __func__);
                adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
index 141f116eb868bfe5569e5693a73c6266ef41ff90..494e8105adee7834341353ddc81cf65a70a33b9c 100644 (file)
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
        struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_esw_statistics port_stats;
        struct qlcnic_mac_statistics mac_stats;
-       int index, ret, length, size, tx_size, ring;
+       int index, ret, length, size, ring;
        char *p;
 
-       tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+       memset(data, 0, stats->n_stats * sizeof(u64));
 
-       memset(data, 0, tx_size * sizeof(u64));
        for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
-               if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
                        tx_ring = &adapter->tx_ring[ring];
                        data = qlcnic_fill_tx_queue_stats(data, tx_ring);
                        qlcnic_update_stats(adapter);
+               } else {
+                       data += QLCNIC_TX_STATS_LEN;
                }
        }
 
-       memset(data, 0, stats->n_stats * sizeof(u64));
        length = QLCNIC_STATS_LEN;
        for (index = 0; index < length; index++) {
                p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
index 188626e2a861d317510617dce742595b88bd3abd..3e96f269150d197253fdcdf3066c1a2da5b62985 100644 (file)
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
 
        if (skb_is_gso(skb)) {
                int err;
+               __be16 l3_proto = vlan_get_protocol(skb);
 
                err = skb_cow_head(skb, 0);
                if (err < 0)
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
                                << OB_MAC_TRANSPORT_HDR_SHIFT);
                mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
-               if (likely(skb->protocol == htons(ETH_P_IP))) {
+               if (likely(l3_proto == htons(ETH_P_IP))) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->check = 0;
                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
                                                                 iph->daddr, 0,
                                                                 IPPROTO_TCP,
                                                                 0);
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
                        tcp_hdr(skb)->check =
                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
index 91652e7235e469be8c20a7ecd7a5b926178ab1af..0921302553c6f09f871e8932a0c19e804de61189 100644 (file)
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
                                   netdev_features_t features)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       netdev_features_t changed = features ^ dev->features;
        void __iomem *ioaddr = tp->mmio_addr;
+       u32 rx_config;
 
-       if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
-                        NETIF_F_HW_VLAN_CTAG_RX)))
-               return;
+       rx_config = RTL_R32(RxConfig);
+       if (features & NETIF_F_RXALL)
+               rx_config |= (AcceptErr | AcceptRunt);
+       else
+               rx_config &= ~(AcceptErr | AcceptRunt);
 
-       if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
-               if (features & NETIF_F_RXCSUM)
-                       tp->cp_cmd |= RxChkSum;
-               else
-                       tp->cp_cmd &= ~RxChkSum;
+       RTL_W32(RxConfig, rx_config);
 
-               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
-                       tp->cp_cmd |= RxVlan;
-               else
-                       tp->cp_cmd &= ~RxVlan;
+       if (features & NETIF_F_RXCSUM)
+               tp->cp_cmd |= RxChkSum;
+       else
+               tp->cp_cmd &= ~RxChkSum;
 
-               RTL_W16(CPlusCmd, tp->cp_cmd);
-               RTL_R16(CPlusCmd);
-       }
-       if (changed & NETIF_F_RXALL) {
-               int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
-               if (features & NETIF_F_RXALL)
-                       tmp |= (AcceptErr | AcceptRunt);
-               RTL_W32(RxConfig, tmp);
-       }
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               tp->cp_cmd |= RxVlan;
+       else
+               tp->cp_cmd &= ~RxVlan;
+
+       tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
+
+       RTL_W16(CPlusCmd, tp->cp_cmd);
+       RTL_R16(CPlusCmd);
 }
 
 static int rtl8169_set_features(struct net_device *dev,
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
+       features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+
        rtl_lock_work(tp);
-       __rtl8169_set_features(dev, features);
+       if (features ^ dev->features)
+               __rtl8169_set_features(dev, features);
        rtl_unlock_work(tp);
 
        return 0;
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
        }
 }
 
-static int
-rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
        const unsigned int region = cfg->region;
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_mwi_2;
        }
 
-       tp->cp_cmd = RxChkSum;
+       tp->cp_cmd = 0;
 
        if ((sizeof(dma_addr_t) > 4) &&
            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_master(pdev);
 
-       /*
-        * Pretend we are using VLANs; This bypasses a nasty bug where
-        * Interrupts stop flowing on high load on 8110SCd controllers.
-        */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               tp->cp_cmd |= RxVlan;
-
        rtl_init_mdio_ops(tp);
        rtl_init_pll_power_ops(tp);
        rtl_init_jumbo_ops(tp);
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
 
+       tp->cp_cmd |= RxChkSum | RxVlan;
+
+       /*
+        * Pretend we are using VLANs; This bypasses a nasty bug where
+        * Interrupts stop flowing on high load on 8110SCd controllers.
+        */
        if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+               /* Disallow toggling */
                dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
 
        if (tp->txd_version == RTL_TD_0)
index 9e757c792d846f300288c9848225e1db9a9584fa..196e98a2d93bbfa905a0b13d8a372da00add0f7b 100644 (file)
@@ -5,6 +5,7 @@
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
        depends on HAS_DMA
+       depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
        select CRC32
        select MII
        select MDIO_BITBANG
index 0537381cd2f6a443a6b0188f4e3218c90242c03b..6859437b59fbb688e7e9188b96bdf5a53811a767 100644 (file)
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
        u32 crc;
        int bit;
 
+       if (!efx_dev_registered(efx))
+               return;
+
        netif_addr_lock_bh(net_dev);
 
        efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
index c553f6b5a9131f0af16230f59ccd0557fe1116a5..cf28daba4346f0c288d1718513554b38e4de2176 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "stmmac.h"
 
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 {
        struct stmmac_priv *priv = (struct stmmac_priv *)p;
        unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
        desc->des2 = dma_map_single(priv->device, skb->data,
                                    bmax, DMA_TO_DEVICE);
-       priv->tx_skbuff_dma[entry] = desc->des2;
+       if (dma_mapping_error(priv->device, desc->des2))
+               return -1;
+       priv->tx_skbuff_dma[entry].buf = desc->des2;
        priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
 
        while (len != 0) {
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                        desc->des2 = dma_map_single(priv->device,
                                                    (skb->data + bmax * i),
                                                    bmax, DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry] = desc->des2;
+                       if (dma_mapping_error(priv->device, desc->des2))
+                               return -1;
+                       priv->tx_skbuff_dma[entry].buf = desc->des2;
                        priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
                                                        STMMAC_CHAIN_MODE);
                        priv->hw->desc->set_tx_owner(desc);
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                        desc->des2 = dma_map_single(priv->device,
                                                    (skb->data + bmax * i), len,
                                                    DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry] = desc->des2;
+                       if (dma_mapping_error(priv->device, desc->des2))
+                               return -1;
+                       priv->tx_skbuff_dma[entry].buf = desc->des2;
                        priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
                                                        STMMAC_CHAIN_MODE);
                        priv->hw->desc->set_tx_owner(desc);
index de507c32036c75331e393ad5b013890ffffb9d1b..593e6c4144a7c197140f875f73e86c5583c215d1 100644 (file)
@@ -220,10 +220,10 @@ enum dma_irq_status {
        handle_tx = 0x8,
 };
 
-#define        CORE_IRQ_TX_PATH_IN_LPI_MODE    (1 << 1)
-#define        CORE_IRQ_TX_PATH_EXIT_LPI_MODE  (1 << 2)
-#define        CORE_IRQ_RX_PATH_IN_LPI_MODE    (1 << 3)
-#define        CORE_IRQ_RX_PATH_EXIT_LPI_MODE  (1 << 4)
+#define        CORE_IRQ_TX_PATH_IN_LPI_MODE    (1 << 0)
+#define        CORE_IRQ_TX_PATH_EXIT_LPI_MODE  (1 << 1)
+#define        CORE_IRQ_RX_PATH_IN_LPI_MODE    (1 << 2)
+#define        CORE_IRQ_RX_PATH_EXIT_LPI_MODE  (1 << 3)
 
 #define        CORE_PCS_ANE_COMPLETE           (1 << 5)
 #define        CORE_PCS_LINK_STATUS            (1 << 6)
@@ -287,7 +287,7 @@ struct dma_features {
 
 /* Default LPI timers */
 #define STMMAC_DEFAULT_LIT_LS  0x3E8
-#define STMMAC_DEFAULT_TWT_LS  0x0
+#define STMMAC_DEFAULT_TWT_LS  0x1E
 
 #define STMMAC_CHAIN_MODE      0x1
 #define STMMAC_RING_MODE       0x2
@@ -425,7 +425,7 @@ struct stmmac_mode_ops {
        void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
                      unsigned int extend_desc);
        unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
-       unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+       int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
        int (*set_16kib_bfsize)(int mtu);
        void (*init_desc3)(struct dma_desc *p);
        void (*refill_desc3) (void *priv, struct dma_desc *p);
@@ -445,6 +445,7 @@ struct mac_device_info {
        int multicast_filter_bins;
        int unicast_filter_entries;
        int mcast_bits_log2;
+       unsigned int rx_csum;
 };
 
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
index 71b5419256c138e335c77aab87cf37784ff79037..64d8f56a9c1732f9199cc3a748e25ae8c1107ed7 100644 (file)
@@ -153,7 +153,7 @@ enum inter_frame_gap {
 #define GMAC_CONTROL_RE                0x00000004      /* Receiver Enable */
 
 #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
-                       GMAC_CONTROL_BE)
+                       GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
 
 /* GMAC Frame Filter defines */
 #define GMAC_FRAME_FILTER_PR   0x00000001      /* Promiscuous Mode */
index d8ef18786a1cadae60f5d550269a0036a3f24661..5efe60ea6526b3d06305530b75240b9de719f1f3 100644 (file)
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
        void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONTROL);
 
-       value |= GMAC_CONTROL_IPC;
+       if (hw->rx_csum)
+               value |= GMAC_CONTROL_IPC;
+       else
+               value &= ~GMAC_CONTROL_IPC;
+
        writel(value, ioaddr + GMAC_CONTROL);
 
        value = readl(ioaddr + GMAC_CONTROL);
index 8607488cbcfcfaea7bc70510f0a3f601c5dade7d..192c2491330b1070348f5ccf5c5eb51d492e7b24 100644 (file)
@@ -68,7 +68,7 @@ struct stmmac_counters {
        unsigned int mmc_rx_octetcount_g;
        unsigned int mmc_rx_broadcastframe_g;
        unsigned int mmc_rx_multicastframe_g;
-       unsigned int mmc_rx_crc_errror;
+       unsigned int mmc_rx_crc_error;
        unsigned int mmc_rx_align_error;
        unsigned int mmc_rx_run_error;
        unsigned int mmc_rx_jabber_error;
index 50617c5a0bdb5e63fc930f7af277a1a9fb139af4..08c483bd2ec7bd94d5434f9567c75f609ce27d35 100644 (file)
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
        mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
        mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
        mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
-       mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+       mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
        mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
        mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
        mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
index 650a4be6bce5243e046fcd226719e669f66444e8..5dd50c6cda5be0280e235152057a329a89fb187c 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "stmmac.h"
 
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 {
        struct stmmac_priv *priv = (struct stmmac_priv *)p;
        unsigned int txsize = priv->dma_tx_size;
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des2 = dma_map_single(priv->device, skb->data,
                                            bmax, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       return -1;
+
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
                                                STMMAC_RING_MODE);
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des2 = dma_map_single(priv->device, skb->data + bmax,
                                            len, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       return -1;
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
                                                STMMAC_RING_MODE);
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
        } else {
                desc->des2 = dma_map_single(priv->device, skb->data,
                                            nopaged_len, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       return -1;
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
                                                STMMAC_RING_MODE);
index ca01035634a76fbc88414f6550849cfa1c772403..58097c0e2ad502699ace45392bb3f12855101625 100644 (file)
 #include <linux/ptp_clock_kernel.h>
 #include <linux/reset.h>
 
+struct stmmac_tx_info {
+       dma_addr_t buf;
+       bool map_as_page;
+};
+
 struct stmmac_priv {
        /* Frequently used values are kept adjacent for cache effect */
        struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -45,7 +50,7 @@ struct stmmac_priv {
        u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
-       dma_addr_t *tx_skbuff_dma;
+       struct stmmac_tx_info *tx_skbuff_dma;
        dma_addr_t dma_tx_phy;
        int tx_coalesce;
        int hwts_tx_en;
@@ -105,6 +110,8 @@ struct stmmac_priv {
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_clock_ops;
        unsigned int default_addend;
+       struct clk *clk_ptp_ref;
+       unsigned int clk_ptp_rate;
        u32 adv_ts;
        int use_riwt;
        int irq_wake;
index 9af50bae4dde67c543c15e0344f90ac9d53fd2b8..cf4f38db1c0a60338c1d7479b0cfe28f50b6b558 100644 (file)
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
        STMMAC_MMC_STAT(mmc_rx_octetcount_g),
        STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
        STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
-       STMMAC_MMC_STAT(mmc_rx_crc_errror),
+       STMMAC_MMC_STAT(mmc_rx_crc_error),
        STMMAC_MMC_STAT(mmc_rx_align_error),
        STMMAC_MMC_STAT(mmc_rx_run_error),
        STMMAC_MMC_STAT(mmc_rx_jabber_error),
index 08addd65372818f48075b585eeb1d36ab682025b..b0c1521e08a3a30df88871fdfc36b22bbf6ecb4e 100644 (file)
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
  */
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
+       char *phy_bus_name = priv->plat->phy_bus_name;
        bool ret = false;
 
        /* Using PCS we cannot dial with the phy registers at this stage
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
            (priv->pcs == STMMAC_PCS_RTBI))
                goto out;
 
+       /* Never init EEE in case of a switch is attached */
+       if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
+               goto out;
+
        /* MAC core supports the EEE feature. */
        if (priv->dma_cap.eee) {
                int tx_lpi_timer = priv->tx_lpi_timer;
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        priv->hw->mac->set_eee_timer(priv->hw,
                                                     STMMAC_DEFAULT_LIT_LS,
                                                     tx_lpi_timer);
-               } else
-                       /* Set HW EEE according to the speed */
-                       priv->hw->mac->set_eee_pls(priv->hw,
-                                                  priv->phydev->link);
+               }
+               /* Set HW EEE according to the speed */
+               priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
 
                pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
 
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                /* calculate default added value:
                 * formula is :
                 * addend = (2^32)/freq_div_ratio;
-                * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
-                * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
-                * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+                * where, freq_div_ratio = clk_ptp_ref_i/50MHz
+                * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
+                * NOTE: clk_ptp_ref_i should be >= 50MHz to
                 *       achive 20ns accuracy.
                 *
                 * 2^x * y == (y << x), hence
                 * 2^32 * 50000000 ==> (50000000 << 32)
                 */
                temp = (u64) (50000000ULL << 32);
-               priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+               priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
                priv->hw->ptp->config_addend(priv->ioaddr,
                                             priv->default_addend);
 
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
                return -EOPNOTSUPP;
 
+       /* Fall-back to main clock in case of no PTP ref is passed */
+       priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
+       if (IS_ERR(priv->clk_ptp_ref)) {
+               priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
+               priv->clk_ptp_ref = NULL;
+       } else {
+               clk_prepare_enable(priv->clk_ptp_ref);
+               priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+       }
+
        priv->adv_ts = 0;
        if (priv->dma_cap.atime_stamp && priv->extend_desc)
                priv->adv_ts = 1;
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
 
 static void stmmac_release_ptp(struct stmmac_priv *priv)
 {
+       if (priv->clk_ptp_ref)
+               clk_disable_unprepare(priv->clk_ptp_ref);
        stmmac_ptp_unregister(priv);
 }
 
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev)
                else
                        p = priv->dma_tx + i;
                p->des2 = 0;
-               priv->tx_skbuff_dma[i] = 0;
+               priv->tx_skbuff_dma[i].buf = 0;
+               priv->tx_skbuff_dma[i].map_as_page = false;
                priv->tx_skbuff[i] = NULL;
        }
 
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
                else
                        p = priv->dma_tx + i;
 
-               if (priv->tx_skbuff_dma[i]) {
-                       dma_unmap_single(priv->device,
-                                        priv->tx_skbuff_dma[i],
-                                        priv->hw->desc->get_tx_len(p),
-                                        DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[i] = 0;
+               if (priv->tx_skbuff_dma[i].buf) {
+                       if (priv->tx_skbuff_dma[i].map_as_page)
+                               dma_unmap_page(priv->device,
+                                              priv->tx_skbuff_dma[i].buf,
+                                              priv->hw->desc->get_tx_len(p),
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(priv->device,
+                                                priv->tx_skbuff_dma[i].buf,
+                                                priv->hw->desc->get_tx_len(p),
+                                                DMA_TO_DEVICE);
                }
 
                if (priv->tx_skbuff[i] != NULL) {
                        dev_kfree_skb_any(priv->tx_skbuff[i]);
                        priv->tx_skbuff[i] = NULL;
+                       priv->tx_skbuff_dma[i].buf = 0;
+                       priv->tx_skbuff_dma[i].map_as_page = false;
                }
        }
 }
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
        if (!priv->rx_skbuff)
                goto err_rx_skbuff;
 
-       priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+       priv->tx_skbuff_dma = kmalloc_array(txsize,
+                                           sizeof(*priv->tx_skbuff_dma),
                                            GFP_KERNEL);
        if (!priv->tx_skbuff_dma)
                goto err_tx_skbuff_dma;
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
                        pr_debug("%s: curr %d, dirty %d\n", __func__,
                                 priv->cur_tx, priv->dirty_tx);
 
-               if (likely(priv->tx_skbuff_dma[entry])) {
-                       dma_unmap_single(priv->device,
-                                        priv->tx_skbuff_dma[entry],
-                                        priv->hw->desc->get_tx_len(p),
-                                        DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry] = 0;
+               if (likely(priv->tx_skbuff_dma[entry].buf)) {
+                       if (priv->tx_skbuff_dma[entry].map_as_page)
+                               dma_unmap_page(priv->device,
+                                              priv->tx_skbuff_dma[entry].buf,
+                                              priv->hw->desc->get_tx_len(p),
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(priv->device,
+                                                priv->tx_skbuff_dma[entry].buf,
+                                                priv->hw->desc->get_tx_len(p),
+                                                DMA_TO_DEVICE);
+                       priv->tx_skbuff_dma[entry].buf = 0;
+                       priv->tx_skbuff_dma[entry].map_as_page = false;
                }
                priv->hw->mode->clean_desc3(priv, p);
 
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev)
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->hw, dev->mtu);
 
+       ret = priv->hw->mac->rx_ipc(priv->hw);
+       if (!ret) {
+               pr_warn(" RX IPC Checksum Offload disabled\n");
+               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+               priv->hw->rx_csum = 0;
+       }
+
        /* Enable the MAC Rx/Tx */
        stmmac_set_mac(priv->ioaddr, true);
 
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        if (likely(!is_jumbo)) {
                desc->des2 = dma_map_single(priv->device, skb->data,
                                            nopaged_len, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       goto dma_map_err;
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
                                                csum_insertion, priv->mode);
        } else {
                desc = first;
                entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+               if (unlikely(entry < 0))
+                       goto dma_map_err;
        }
 
        for (i = 0; i < nfrags; i++) {
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
                desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
                                              DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       goto dma_map_err; /* should reuse desc w/o issues */
+
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
+               priv->tx_skbuff_dma[entry].map_as_page = true;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
                                                priv->mode);
                wmb();
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 
        spin_unlock(&priv->tx_lock);
+       return NETDEV_TX_OK;
 
+dma_map_err:
+       dev_err(priv->device, "Tx dma map failed\n");
+       dev_kfree_skb(skb);
+       priv->dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
 
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                        priv->rx_skbuff_dma[entry] =
                            dma_map_single(priv->device, skb->data, bfsize,
                                           DMA_FROM_DEVICE);
-
+                       if (dma_mapping_error(priv->device,
+                                             priv->rx_skbuff_dma[entry])) {
+                               dev_err(priv->device, "Rx dma map failed\n");
+                               dev_kfree_skb(skb);
+                               break;
+                       }
                        p->des2 = priv->rx_skbuff_dma[entry];
 
                        priv->hw->mode->refill_desc3(priv, p);
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
        unsigned int entry = priv->cur_rx % rxsize;
        unsigned int next_entry;
        unsigned int count = 0;
-       int coe = priv->plat->rx_coe;
+       int coe = priv->hw->rx_csum;
 
        if (netif_msg_rx_status(priv)) {
                pr_debug("%s: descriptor ring:\n", __func__);
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
 
        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
                features &= ~NETIF_F_RXCSUM;
-       else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
-               features &= ~NETIF_F_IPV6_CSUM;
+
        if (!priv->plat->tx_coe)
                features &= ~NETIF_F_ALL_CSUM;
 
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
        return features;
 }
 
+static int stmmac_set_features(struct net_device *netdev,
+                              netdev_features_t features)
+{
+       struct stmmac_priv *priv = netdev_priv(netdev);
+
+       /* Keep the COE Type in case of csum is supporting */
+       if (features & NETIF_F_RXCSUM)
+               priv->hw->rx_csum = priv->plat->rx_coe;
+       else
+               priv->hw->rx_csum = 0;
+       /* No check needed because rx_coe has been set before and it will be
+        * fixed in case of issue.
+        */
+       priv->hw->mac->rx_ipc(priv->hw);
+
+       return 0;
+}
+
 /**
  *  stmmac_interrupt - main ISR
  *  @irq: interrupt number.
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
        .ndo_stop = stmmac_release,
        .ndo_change_mtu = stmmac_change_mtu,
        .ndo_fix_features = stmmac_fix_features,
+       .ndo_set_features = stmmac_set_features,
        .ndo_set_rx_mode = stmmac_set_rx_mode,
        .ndo_tx_timeout = stmmac_tx_timeout,
        .ndo_do_ioctl = stmmac_ioctl,
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
  */
 static int stmmac_hw_init(struct stmmac_priv *priv)
 {
-       int ret;
        struct mac_device_info *mac;
 
        /* Identify the MAC HW device */
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
        /* To use alternate (extended) or normal descriptor structures */
        stmmac_selec_desc_mode(priv);
 
-       ret = priv->hw->mac->rx_ipc(priv->hw);
-       if (!ret) {
-               pr_warn(" RX IPC Checksum Offload not configured.\n");
-               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
-       }
-
-       if (priv->plat->rx_coe)
+       if (priv->plat->rx_coe) {
+               priv->hw->rx_csum = priv->plat->rx_coe;
                pr_info(" RX Checksum Offload Engine supported (type %d)\n",
                        priv->plat->rx_coe);
+       }
        if (priv->plat->tx_coe)
                pr_info(" TX Checksum insertion supported\n");
 
@@ -2716,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
        if (IS_ERR(priv->stmmac_clk)) {
                dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
                         __func__);
-               ret = PTR_ERR(priv->stmmac_clk);
-               goto error_clk_get;
+               /* If failed to obtain stmmac_clk and specific clk_csr value
+                * is NOT passed from the platform, probe fail.
+                */
+               if (!priv->plat->clk_csr) {
+                       ret = PTR_ERR(priv->stmmac_clk);
+                       goto error_clk_get;
+               } else {
+                       priv->stmmac_clk = NULL;
+               }
        }
        clk_prepare_enable(priv->stmmac_clk);
 
index b7ad3565566cc8a09b7964fcb59aca3921e8e57c..c5ee79d8a8c56478f9987efc2c0631c140aafc0d 100644 (file)
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
 {
        if (priv->ptp_clock) {
                ptp_clock_unregister(priv->ptp_clock);
+               priv->ptp_clock = NULL;
                pr_debug("Removed PTP HW clock successfully on %s\n",
                         priv->dev->name);
        }
index 3dbc047622fa8bee67ed44213b0f8cd74a4ca956..4535df37c22767824d1f7bbe6db56a8e3d0644ab 100644 (file)
@@ -25,8 +25,6 @@
 #ifndef __STMMAC_PTP_H__
 #define __STMMAC_PTP_H__
 
-#define STMMAC_SYSCLOCK 62500000
-
 /* IEEE 1588 PTP register offsets */
 #define PTP_TCR                0x0700  /* Timestamp Control Reg */
 #define PTP_SSIR       0x0704  /* Sub-Second Increment Reg */
index 23c89ab5a6ada1ade365939dcd7bb28da44ca736..f67539650c380010eced17ea5ab880ec3dc0e328 100644 (file)
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
        if (IS_ERR(desc))
                return PTR_ERR(desc);
 
+       if (desc->hdr.state != VIO_DESC_READY)
+               return 1;
+
+       rmb();
+
        viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
               desc->hdr.state, desc->hdr.ack,
               desc->size, desc->ncookies,
               desc->cookies[0].cookie_addr,
               desc->cookies[0].cookie_size);
 
-       if (desc->hdr.state != VIO_DESC_READY)
-               return 1;
        err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
        if (err == -ECONNRESET)
                return err;
index 999fb72688d226317d0565c936a286386ae6dd98..e2a00287f8eb8a1e86a471851e05c6e46d5aa487 100644 (file)
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
        cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
 
        if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+               bool ndev_status = false;
+               struct cpsw_slave *slave = priv->slaves;
+               int n;
+
+               if (priv->data.dual_emac) {
+                       /* In dual emac mode check for all interfaces */
+                       for (n = priv->data.slaves; n; n--, slave++)
+                               if (netif_running(slave->ndev))
+                                       ndev_status = true;
+               }
+
+               if (ndev_status && (status >= 0)) {
+                       /* The packet received is for the interface which
+                        * is already down and the other interface is up
+                        * and running, intead of freeing which results
+                        * in reducing of the number of rx descriptor in
+                        * DMA engine, requeue skb back to cpdma.
+                        */
+                       new_skb = skb;
+                       goto requeue;
+               }
+
                /* the interface is going down, skbs are purged */
                dev_kfree_skb_any(skb);
                return;
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
                new_skb = skb;
        }
 
+requeue:
        ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
                        skb_tailroom(new_skb), 0);
        if (WARN_ON(ret < 0))
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
        struct net_device       *ndev = platform_get_drvdata(pdev);
        struct cpsw_priv        *priv = netdev_priv(ndev);
 
-       if (netif_running(ndev))
-               cpsw_ndo_stop(ndev);
+       if (priv->data.dual_emac) {
+               int i;
 
-       for_each_slave(priv, soft_reset_slave);
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (netif_running(priv->slaves[i].ndev))
+                               cpsw_ndo_stop(priv->slaves[i].ndev);
+                       soft_reset_slave(priv->slaves + i);
+               }
+       } else {
+               if (netif_running(ndev))
+                       cpsw_ndo_stop(ndev);
+               for_each_slave(priv, soft_reset_slave);
+       }
 
        pm_runtime_put_sync(&pdev->dev);
 
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
        struct net_device       *ndev = platform_get_drvdata(pdev);
+       struct cpsw_priv        *priv = netdev_priv(ndev);
 
        pm_runtime_get_sync(&pdev->dev);
 
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (netif_running(ndev))
-               cpsw_ndo_open(ndev);
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (netif_running(priv->slaves[i].ndev))
+                               cpsw_ndo_open(priv->slaves[i].ndev);
+               }
+       } else {
+               if (netif_running(ndev))
+                       cpsw_ndo_open(ndev);
+       }
        return 0;
 }
 
index c1ba26c06d731252771dfe7f2273b724bc675068..3de2f0d15fe24b22c58a912c1502e7513ad6e6cc 100644 (file)
 #define        PCI_MEM64BIT    (2<<1)       /* Base addr anywhere in 64 Bit range */
 #define        PCI_MEMSPACE    0x00000001L  /* Bit 0:  Memory Space Indic. */
 
-/*     PCI_BASE_2ND    32 bit  2nd Base address */
-#define        PCI_IOBASE      0xffffff00L  /* Bit 31..8:  I/O Base address */
-#define        PCI_IOSIZE      0x000000fcL  /* Bit 7..2:   I/O Size Requirements */
-#define        PCI_IOSPACE     0x00000001L  /* Bit 0:      I/O Space Indicator */
-
 /*     PCI_SUB_VID     16 bit  Subsystem Vendor ID */
 /*     PCI_SUB_ID      16 bit  Subsystem ID */
 
index a9c5eaadc426b8cd93e1cacfb65142d306b22f78..0fcb5e7eb073e9eb66b2c17e53675ef87490639a 100644 (file)
@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        int  hdr_offset;
        u32 net_trans_info;
        u32 hash;
+       u32 skb_length = skb->len;
 
 
        /* We will atmost need two pages to describe the rndis
@@ -562,7 +563,7 @@ do_send:
 
 drop:
        if (ret == 0) {
-               net->stats.tx_bytes += skb->len;
+               net->stats.tx_bytes += skb_length;
                net->stats.tx_packets++;
        } else {
                kfree(packet);
index a96955597755326475b5478b974c59ba02c223c7..726edabff26b710d06b622782a71092430b184b4 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/netpoll.h>
 
 #define MACVLAN_HASH_SIZE      (1 << BITS_PER_BYTE)
+#define MACVLAN_BC_QUEUE_LEN   1000
 
 struct macvlan_port {
        struct net_device       *dev;
@@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
                goto err;
 
        spin_lock(&port->bc_queue.lock);
-       if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+       if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
                __skb_queue_tail(&port->bc_queue, nskb);
                err = 0;
        }
@@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
                                             features,
                                             mask);
        features |= ALWAYS_ON_FEATURES;
+       features &= ~NETIF_F_NETNS_LOCAL;
 
        return features;
 }
index 3381c4f91a8cc236df0df8be59bd586538480498..0c6adaaf898c9bf0b783e3bea8957376de983ccd 100644 (file)
@@ -112,17 +112,15 @@ out:
        return err;
 }
 
+/* Requires RTNL */
 static int macvtap_set_queue(struct net_device *dev, struct file *file,
                             struct macvtap_queue *q)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
-       int err = -EBUSY;
 
-       rtnl_lock();
        if (vlan->numqueues == MAX_MACVTAP_QUEUES)
-               goto out;
+               return -EBUSY;
 
-       err = 0;
        rcu_assign_pointer(q->vlan, vlan);
        rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
        sock_hold(&q->sk);
@@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file,
        vlan->numvtaps++;
        vlan->numqueues++;
 
-out:
-       rtnl_unlock();
-       return err;
+       return 0;
 }
 
 static int macvtap_disable_queue(struct macvtap_queue *q)
@@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk)
 static int macvtap_open(struct inode *inode, struct file *file)
 {
        struct net *net = current->nsproxy->net_ns;
-       struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
+       struct net_device *dev;
        struct macvtap_queue *q;
-       int err;
+       int err = -ENODEV;
 
-       err = -ENODEV;
+       rtnl_lock();
+       dev = dev_get_by_macvtap_minor(iminor(inode));
        if (!dev)
                goto out;
 
@@ -498,6 +495,7 @@ out:
        if (dev)
                dev_put(dev);
 
+       rtnl_unlock();
        return err;
 }
 
index fd0ea7c50ee615920c56c94a867287db4e016dbf..011dbda2b2f1571e0e8241eb38ce3fc96f81a986 100644 (file)
@@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9031,
        .phy_id_mask    = 0x00fffff0,
        .name           = "Micrel KSZ9031 Gigabit PHY",
-       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
-                               | SUPPORTED_Asym_Pause),
+       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
index c94e2a27446a33a685b8b1c17dee4e559c33afd7..a854d38c231dfebc983a3bcd4840fdd5a2257bd2 100644 (file)
@@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                /* First check if the EEE ability is supported */
                eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
                                                MDIO_MMD_PCS, phydev->addr);
-               if (eee_cap < 0)
-                       return eee_cap;
+               if (eee_cap <= 0)
+                       goto eee_exit_err;
 
                cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
                if (!cap)
-                       return -EPROTONOSUPPORT;
+                       goto eee_exit_err;
 
                /* Check which link settings negotiated and verify it in
                 * the EEE advertising registers.
                 */
                eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
                                               MDIO_MMD_AN, phydev->addr);
-               if (eee_lp < 0)
-                       return eee_lp;
+               if (eee_lp <= 0)
+                       goto eee_exit_err;
 
                eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
                                                MDIO_MMD_AN, phydev->addr);
-               if (eee_adv < 0)
-                       return eee_adv;
+               if (eee_adv <= 0)
+                       goto eee_exit_err;
 
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
                if (!(lp & adv & settings[idx].setting))
-                       return -EPROTONOSUPPORT;
+                       goto eee_exit_err;
 
                if (clk_stop_enable) {
                        /* Configure the PHY to stop receiving xMII
@@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 
                return 0; /* EEE supported */
        }
-
+eee_exit_err:
        return -EPROTONOSUPPORT;
 }
 EXPORT_SYMBOL(phy_init_eee);
index 87f710476217d8a02cc496925d57ac5fcd0021c8..604ef210a4dea5f8e8bd9b543b3dc4156b357286 100644 (file)
@@ -24,7 +24,7 @@
 #include <net/ip6_checksum.h>
 
 /* Version Information */
-#define DRIVER_VERSION "v1.06.0 (2014/03/03)"
+#define DRIVER_VERSION "v1.06.1 (2014/10/01)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
@@ -1949,10 +1949,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
 }
 
+static int rtl_start_rx(struct r8152 *tp)
+{
+       int i, ret = 0;
+
+       INIT_LIST_HEAD(&tp->rx_done);
+       for (i = 0; i < RTL8152_MAX_RX; i++) {
+               INIT_LIST_HEAD(&tp->rx_info[i].list);
+               ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int rtl_stop_rx(struct r8152 *tp)
+{
+       int i;
+
+       for (i = 0; i < RTL8152_MAX_RX; i++)
+               usb_kill_urb(tp->rx_info[i].urb);
+
+       return 0;
+}
+
 static int rtl_enable(struct r8152 *tp)
 {
        u32 ocp_data;
-       int i, ret;
 
        r8152b_reset_packet_filter(tp);
 
@@ -1962,14 +1986,7 @@ static int rtl_enable(struct r8152 *tp)
 
        rxdy_gated_en(tp, false);
 
-       INIT_LIST_HEAD(&tp->rx_done);
-       ret = 0;
-       for (i = 0; i < RTL8152_MAX_RX; i++) {
-               INIT_LIST_HEAD(&tp->rx_info[i].list);
-               ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
-       }
-
-       return ret;
+       return rtl_start_rx(tp);
 }
 
 static int rtl8152_enable(struct r8152 *tp)
@@ -2019,7 +2036,7 @@ static int rtl8153_enable(struct r8152 *tp)
        return rtl_enable(tp);
 }
 
-static void rtl8152_disable(struct r8152 *tp)
+static void rtl_disable(struct r8152 *tp)
 {
        u32 ocp_data;
        int i;
@@ -2053,8 +2070,7 @@ static void rtl8152_disable(struct r8152 *tp)
                mdelay(1);
        }
 
-       for (i = 0; i < RTL8152_MAX_RX; i++)
-               usb_kill_urb(tp->rx_info[i].urb);
+       rtl_stop_rx(tp);
 
        rtl8152_nic_reset(tp);
 }
@@ -2185,28 +2201,6 @@ static void rtl_phy_reset(struct r8152 *tp)
        }
 }
 
-static void rtl_clear_bp(struct r8152 *tp)
-{
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
-       mdelay(3);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
-       ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
-}
-
-static void r8153_clear_bp(struct r8152 *tp)
-{
-       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
-       ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
-       rtl_clear_bp(tp);
-}
-
 static void r8153_teredo_off(struct r8152 *tp)
 {
        u32 ocp_data;
@@ -2232,6 +2226,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
                                            LINKENA | DIS_SDSAVE);
 }
 
+static void rtl8152_disable(struct r8152 *tp)
+{
+       r8152b_disable_aldps(tp);
+       rtl_disable(tp);
+       r8152b_enable_aldps(tp);
+}
+
 static void r8152b_hw_phy_cfg(struct r8152 *tp)
 {
        u16 data;
@@ -2242,11 +2243,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
                r8152_mdio_write(tp, MII_BMCR, data);
        }
 
-       r8152b_disable_aldps(tp);
-
-       rtl_clear_bp(tp);
-
-       r8152b_enable_aldps(tp);
        set_bit(PHY_RESET, &tp->flags);
 }
 
@@ -2255,9 +2251,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
-               return;
-
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
        ocp_data &= ~RCR_ACPT_ALL;
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2347,7 +2340,7 @@ static void r8152b_enter_oob(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
 
-       rtl8152_disable(tp);
+       rtl_disable(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2400,8 +2393,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
                r8152_mdio_write(tp, MII_BMCR, data);
        }
 
-       r8153_clear_bp(tp);
-
        if (tp->version == RTL_VER_03) {
                data = ocp_reg_read(tp, OCP_EEE_CFG);
                data &= ~CTAP_SHORT_EN;
@@ -2485,9 +2476,6 @@ static void r8153_first_init(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
-               return;
-
        rxdy_gated_en(tp, true);
        r8153_teredo_off(tp);
 
@@ -2560,7 +2548,7 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_data &= ~NOW_IS_OOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
-       rtl8152_disable(tp);
+       rtl_disable(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2624,6 +2612,13 @@ static void r8153_enable_aldps(struct r8152 *tp)
        ocp_reg_write(tp, OCP_POWER_CFG, data);
 }
 
+static void rtl8153_disable(struct r8152 *tp)
+{
+       r8153_disable_aldps(tp);
+       rtl_disable(tp);
+       r8153_enable_aldps(tp);
+}
+
 static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
 {
        u16 bmcr, anar, gbcr;
@@ -2714,6 +2709,16 @@ out:
        return ret;
 }
 
+static void rtl8152_up(struct r8152 *tp)
+{
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       r8152b_disable_aldps(tp);
+       r8152b_exit_oob(tp);
+       r8152b_enable_aldps(tp);
+}
+
 static void rtl8152_down(struct r8152 *tp)
 {
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2727,6 +2732,16 @@ static void rtl8152_down(struct r8152 *tp)
        r8152b_enable_aldps(tp);
 }
 
+static void rtl8153_up(struct r8152 *tp)
+{
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       r8153_disable_aldps(tp);
+       r8153_first_init(tp);
+       r8153_enable_aldps(tp);
+}
+
 static void rtl8153_down(struct r8152 *tp)
 {
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2946,6 +2961,8 @@ static void r8152b_init(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       r8152b_disable_aldps(tp);
+
        if (tp->version == RTL_VER_01) {
                ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
                ocp_data &= ~LED_MODE_MASK;
@@ -2984,6 +3001,7 @@ static void r8153_init(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       r8153_disable_aldps(tp);
        r8153_u1u2en(tp, false);
 
        for (i = 0; i < 500; i++) {
@@ -3055,13 +3073,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                cancel_delayed_work_sync(&tp->schedule);
+               tasklet_disable(&tp->tl);
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+                       rtl_stop_rx(tp);
                        rtl_runtime_suspend_enable(tp, true);
                } else {
-                       tasklet_disable(&tp->tl);
                        tp->rtl_ops.down(tp);
-                       tasklet_enable(&tp->tl);
                }
+               tasklet_enable(&tp->tl);
        }
 
        return 0;
@@ -3080,17 +3099,18 @@ static int rtl8152_resume(struct usb_interface *intf)
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        rtl_runtime_suspend_enable(tp, false);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                       set_bit(WORK_ENABLE, &tp->flags);
                        if (tp->speed & LINK_STATUS)
-                               tp->rtl_ops.disable(tp);
+                               rtl_start_rx(tp);
                } else {
                        tp->rtl_ops.up(tp);
                        rtl8152_set_speed(tp, AUTONEG_ENABLE,
                                tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
                                DUPLEX_FULL);
+                       tp->speed = 0;
+                       netif_carrier_off(tp->netdev);
+                       set_bit(WORK_ENABLE, &tp->flags);
                }
-               tp->speed = 0;
-               netif_carrier_off(tp->netdev);
-               set_bit(WORK_ENABLE, &tp->flags);
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
        }
 
@@ -3377,7 +3397,7 @@ static void rtl8153_unload(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
-       r8153_power_cut_en(tp, true);
+       r8153_power_cut_en(tp, false);
 }
 
 static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -3392,7 +3412,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                        ops->init               = r8152b_init;
                        ops->enable             = rtl8152_enable;
                        ops->disable            = rtl8152_disable;
-                       ops->up                 = r8152b_exit_oob;
+                       ops->up                 = rtl8152_up;
                        ops->down               = rtl8152_down;
                        ops->unload             = rtl8152_unload;
                        ret = 0;
@@ -3400,8 +3420,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                case PRODUCT_ID_RTL8153:
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
-                       ops->disable            = rtl8152_disable;
-                       ops->up                 = r8153_first_init;
+                       ops->disable            = rtl8153_disable;
+                       ops->up                 = rtl8153_up;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
@@ -3416,8 +3436,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                case PRODUCT_ID_SAMSUNG:
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
-                       ops->disable            = rtl8152_disable;
-                       ops->up                 = r8153_first_init;
+                       ops->disable            = rtl8153_disable;
+                       ops->up                 = rtl8153_up;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
@@ -3530,7 +3550,11 @@ static void rtl8152_disconnect(struct usb_interface *intf)
 
        usb_set_intfdata(intf, NULL);
        if (tp) {
-               set_bit(RTL8152_UNPLUG, &tp->flags);
+               struct usb_device *udev = tp->udev;
+
+               if (udev->state == USB_STATE_NOTATTACHED)
+                       set_bit(RTL8152_UNPLUG, &tp->flags);
+
                tasklet_kill(&tp->tl);
                unregister_netdev(tp->netdev);
                tp->rtl_ops.unload(tp);
index d6e90c72c257ebe6740d3980dc7273c6b128978d..6dfcbf523936ef69527f5d23168dbf51ee638094 100644 (file)
@@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev)
                if (!netdev_mc_empty(netdev)) {
                        new_table = vmxnet3_copy_mc(netdev);
                        if (new_table) {
-                               new_mode |= VMXNET3_RXM_MCAST;
                                rxConf->mfTableLen = cpu_to_le16(
                                        netdev_mc_count(netdev) * ETH_ALEN);
                                new_table_pa = dma_map_single(
@@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev)
                                                        new_table,
                                                        rxConf->mfTableLen,
                                                        PCI_DMA_TODEVICE);
+                       }
+
+                       if (new_table_pa) {
+                               new_mode |= VMXNET3_RXM_MCAST;
                                rxConf->mfTablePA = cpu_to_le64(new_table_pa);
                        } else {
-                               netdev_info(netdev, "failed to copy mcast list"
-                                           ", setting ALL_MULTI\n");
+                               netdev_info(netdev,
+                                           "failed to copy mcast list, setting ALL_MULTI\n");
                                new_mode |= VMXNET3_RXM_ALL_MULTI;
                        }
                }
 
-
        if (!(new_mode & VMXNET3_RXM_MCAST)) {
                rxConf->mfTableLen = 0;
                rxConf->mfTablePA = 0;
@@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev)
                               VMXNET3_CMD_UPDATE_MAC_FILTERS);
        spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
-       if (new_table) {
+       if (new_table_pa)
                dma_unmap_single(&adapter->pdev->dev, new_table_pa,
                                 rxConf->mfTableLen, PCI_DMA_TODEVICE);
-               kfree(new_table);
-       }
+       kfree(new_table);
 }
 
 void
index 29ee77f2c97f3cc0e27e2985751297e3fbf01f90..3759479f959a43b86654e7b493a5d1beb5b6034b 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.2.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.2.1.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01020000
+#define VMXNET3_DRIVER_VERSION_NUM      0x01020100
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index 1fb7b37d1402a447b84298829bab61d4f8b6d129..beb377b2d4b78e67e15fa2c7fe6404bef240eba0 100644 (file)
@@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
        } else if (vxlan->flags & VXLAN_F_L3MISS) {
                union vxlan_addr ipa = {
                        .sin.sin_addr.s_addr = tip,
-                       .sa.sa_family = AF_INET,
+                       .sin.sin_family = AF_INET,
                };
 
                vxlan_ip_miss(dev, &ipa);
@@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
        } else if (vxlan->flags & VXLAN_F_L3MISS) {
                union vxlan_addr ipa = {
                        .sin6.sin6_addr = msg->target,
-                       .sa.sa_family = AF_INET6,
+                       .sin6.sin6_family = AF_INET6,
                };
 
                vxlan_ip_miss(dev, &ipa);
@@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
                if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
                        union vxlan_addr ipa = {
                                .sin.sin_addr.s_addr = pip->daddr,
-                               .sa.sa_family = AF_INET,
+                               .sin.sin_family = AF_INET,
                        };
 
                        vxlan_ip_miss(dev, &ipa);
@@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
                if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
                        union vxlan_addr ipa = {
                                .sin6.sin6_addr = pip6->daddr,
-                               .sa.sa_family = AF_INET6,
+                               .sin6.sin6_family = AF_INET6,
                        };
 
                        vxlan_ip_miss(dev, &ipa);
index 334c2ece855a92af6576b01ec87cd47af58b8540..da92bfa76b7cf1d37e9ea819edf00c59d84e350b 100644 (file)
@@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv)
 
        kfree_skb(priv->rx_skb);
 
-       usb_put_dev(priv->udev);
-
        at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
                 __func__);
        ieee80211_free_hw(priv->hw);
@@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface)
 
        wiphy_info(priv->hw->wiphy, "disconnecting\n");
        at76_delete_device(priv);
+       usb_put_dev(priv->udev);
        dev_info(&interface->dev, "disconnected\n");
 }
 
index 733be5178481e6967141e952a23f348705bae2e4..6ad44470d0f256dfd6900d93f2fd4a342ce9a39d 100644 (file)
@@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
                                 struct ath9k_beacon_state *bs)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       int dtim_intval, sleepduration;
+       int dtim_intval;
        u64 tsf;
 
        /* No need to configure beacon if we are not associated */
@@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         * last beacon we received (which may be none).
         */
        dtim_intval = conf->intval * conf->dtim_period;
-       sleepduration = ah->hw->conf.listen_interval * conf->intval;
 
        /*
         * Pull nexttbtt forward to reflect the current
@@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         */
 
        bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
-                                                 sleepduration));
+                                                 conf->intval));
        if (bs->bs_sleepduration > bs->bs_dtimperiod)
                bs->bs_sleepduration = bs->bs_dtimperiod;
 
index bb86eb2ffc953545d759647d70cacc86a4227022..f0484b1b617e9a043e91ad46cc2e042848caa648 100644 (file)
@@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        struct ath_hw *ah = common->ah;
        struct ath_htc_rx_status *rxstatus;
        struct ath_rx_status rx_stats;
-       bool decrypt_error;
+       bool decrypt_error = false;
 
        if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
                ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
index e6ac8d2e610ca421f60dbfa6eb68dd46f8061c9d..4b148bbb2bf6c40d58d38fd835c75216d9139bac 100644 (file)
@@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev)
         * touch anything. Note this can happen early
         * on if the IRQ is shared.
         */
-       if (test_bit(ATH_OP_INVALID, &common->op_flags))
+       if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
                return IRQ_NONE;
 
        /* shared irq, not for us */
index 5fe29b9f8fa26e31b05eaab237ac4830aaf21471..8f68426ca653d93dc58ea990a213c7091a13553a 100644 (file)
@@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
 
        if (strncmp("trigger", buf, 7) == 0) {
                ath9k_spectral_scan_trigger(sc->hw);
-       } else if (strncmp("background", buf, 9) == 0) {
+       } else if (strncmp("background", buf, 10) == 0) {
                ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
                ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
        } else if (strncmp("chanscan", buf, 8) == 0) {
index b8e2561ea6458212c29c847a47e25ddd21b4497d..fe3dc126b149f8fd014f0bd18ff407d80af00003 100644 (file)
@@ -27,10 +27,17 @@ config BRCMFMAC
          one of the bus interface support. If you choose to build a module,
          it'll be called brcmfmac.ko.
 
+config BRCMFMAC_PROTO_BCDC
+       bool
+
+config BRCMFMAC_PROTO_MSGBUF
+       bool
+
 config BRCMFMAC_SDIO
        bool "SDIO bus interface support for FullMAC driver"
        depends on (MMC = y || MMC = BRCMFMAC)
        depends on BRCMFMAC
+       select BRCMFMAC_PROTO_BCDC
        select FW_LOADER
        default y
        ---help---
@@ -42,6 +49,7 @@ config BRCMFMAC_USB
        bool "USB bus interface support for FullMAC driver"
        depends on (USB = y || USB = BRCMFMAC)
        depends on BRCMFMAC
+       select BRCMFMAC_PROTO_BCDC
        select FW_LOADER
        ---help---
          This option enables the USB bus interface support for Broadcom
@@ -52,6 +60,8 @@ config BRCMFMAC_PCIE
        bool "PCIE bus interface support for FullMAC driver"
        depends on BRCMFMAC
        depends on PCI
+       depends on HAS_DMA
+       select BRCMFMAC_PROTO_MSGBUF
        select FW_LOADER
        ---help---
          This option enables the PCIE bus interface support for Broadcom
index c35adf4bc70b72e584969b6bfd4bae251d21b7fe..90a977fe9a648d0f34eda15bbcba305eac6d35c0 100644 (file)
@@ -30,16 +30,18 @@ brcmfmac-objs += \
                fwsignal.o \
                p2p.o \
                proto.o \
-               bcdc.o \
-               commonring.o \
-               flowring.o \
-               msgbuf.o \
                dhd_common.o \
                dhd_linux.o \
                firmware.o \
                feature.o \
                btcoex.o \
                vendor.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
+               bcdc.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
+               commonring.o \
+               flowring.o \
+               msgbuf.o
 brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
                dhd_sdio.o \
                bcmsdh.o
index 17e8c039ff321daa6e161644d481b92b33189871..6003179c0ceb286ce2a396d96eb1f436d727b80f 100644 (file)
 #ifndef BRCMFMAC_BCDC_H
 #define BRCMFMAC_BCDC_H
 
-
+#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
 int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
 void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
-
+#else
+static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
+static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
+#endif
 
 #endif /* BRCMFMAC_BCDC_H */
index 4f1daabc551b0824c4f72bc22faed58fd47a67d5..44fc85f68f7aceba2788da99126164b0a946a6a7 100644 (file)
@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                  ifevent->action, ifevent->ifidx, ifevent->bssidx,
                  ifevent->flags, ifevent->role);
 
-       if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
+       /* The P2P Device interface event must not be ignored
+        * contrary to what firmware tells us. The only way to
+        * distinguish the P2P Device is by looking at the ifidx
+        * and bssidx received.
+        */
+       if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
+           (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
                brcmf_dbg(EVENT, "event can be ignored\n");
                return;
        }
@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                                return;
        }
 
-       if (ifevent->action == BRCMF_E_IF_CHANGE)
+       if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
                brcmf_fws_reset_interface(ifp);
 
        err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
 
-       if (ifevent->action == BRCMF_E_IF_DEL) {
+       if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
                brcmf_fws_del_interface(ifp);
                brcmf_del_if(drvr, ifevent->bssidx);
        }
index dd20b1862d44b94121076e6287e4f704c9f1daee..cbf033f59109db924e824c18983cd511d1aab949 100644 (file)
@@ -172,6 +172,8 @@ enum brcmf_fweh_event_code {
 #define BRCMF_E_IF_ROLE_STA                    0
 #define BRCMF_E_IF_ROLE_AP                     1
 #define BRCMF_E_IF_ROLE_WDS                    2
+#define BRCMF_E_IF_ROLE_P2P_GO                 3
+#define BRCMF_E_IF_ROLE_P2P_CLIENT             4
 
 /**
  * definitions for event packet validation.
index f901ae52bf2b4174db4fad2620b029d10d6b1707..77a51b8c1e120824a4eb6d0b336d3875d3c16fe5 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef BRCMFMAC_MSGBUF_H
 #define BRCMFMAC_MSGBUF_H
 
+#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
 
 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM      20
 #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM       256
 
 
 int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
-
+#else
+static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+{
+       return 0;
+}
+static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {}
+#endif
 
 #endif /* BRCMFMAC_MSGBUF_H */
index 02fe706fc9ec8cbeed88baacc377c33e6892dd6d..16a246bfc34380dbaa8329e7675583f5269b26d1 100644 (file)
@@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
 static void
 brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
 {
-       struct net_device *ndev = wdev->netdev;
-       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_cfg80211_vif *vif;
+       struct brcmf_if *ifp;
+
+       vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+       ifp = vif->ifp;
 
        if ((wdev->iftype == NL80211_IFTYPE_ADHOC) ||
            (wdev->iftype == NL80211_IFTYPE_AP) ||
@@ -4918,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
        struct brcmu_chan ch;
        int i;
 
-       for (i = 0; i <= total; i++) {
+       for (i = 0; i < total; i++) {
                ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
                cfg->d11inf.decchspec(&ch);
 
@@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 
                ch.band = BRCMU_CHAN_BAND_2G;
                ch.bw = BRCMU_CHAN_BW_40;
+               ch.sb = BRCMU_CHAN_SB_NONE;
                ch.chnum = 0;
                cfg->d11inf.encchspec(&ch);
 
@@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 
                        brcmf_update_bw40_channel_flag(&band->channels[j], &ch);
                }
+               kfree(pbuf);
        }
        return err;
 }
index 6451d2b6abcff3c33f486c9d7eb90390647a1266..824f5e2877835d8a829f643d8b15cf1ed85e2572 100644 (file)
@@ -51,7 +51,6 @@ config IWLWIFI_LEDS
 
 config IWLDVM
        tristate "Intel Wireless WiFi DVM Firmware support"
-       depends on m
        default IWLWIFI
        help
          This is the driver that supports the DVM firmware which is
@@ -60,7 +59,6 @@ config IWLDVM
 
 config IWLMVM
        tristate "Intel Wireless WiFi MVM Firmware support"
-       depends on m
        help
          This is the driver that supports the MVM firmware which is
          currently only available for 7260 and 3160 devices.
index 760c45c34ef36ea18a90c7762afac5c602796d3d..1513dbc79c148e924b29b430e7b850e0f7935ebe 100644 (file)
@@ -40,7 +40,7 @@
 #include "commands.h"
 #include "power.h"
 
-static bool force_cam;
+static bool force_cam = true;
 module_param(force_cam, bool, 0644);
 MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");
 
index 6dc5dd3ced44723943934f114c6fde78c98a565f..ed50de6362ed1d5dcd56b45243ff0b140dcbafe5 100644 (file)
@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        /* recalculate basic rates */
        iwl_calc_basic_rates(priv, ctx);
 
+       /*
+        * force CTS-to-self frames protection if RTS-CTS is not preferred
+        * one aggregation protection method
+        */
+       if (!priv->hw_params.use_rts_for_aggregation)
+               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
        if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
            !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
                ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
        else
                ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
 
+       if (bss_conf->use_cts_prot)
+               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+
        memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
 
        if (vif->type == NL80211_IFTYPE_AP ||
index 48730064da73f5e1e058f756d9473a2a0a5bc376..d53adc245497cf29940374510c933a40a4704f77 100644 (file)
@@ -67,8 +67,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  9
-#define IWL3160_UCODE_API_MAX  9
+#define IWL7260_UCODE_API_MAX  10
+#define IWL3160_UCODE_API_MAX  10
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   9
@@ -83,6 +83,8 @@
 #define IWL7260_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL3160_NVM_VERSION            0x709
 #define IWL3160_TX_POWER_VERSION       0xffff /* meaningless */
+#define IWL3165_NVM_VERSION            0x709
+#define IWL3165_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL7265_NVM_VERSION            0x0a1d
 #define IWL7265_TX_POWER_VERSION       0xffff /* meaningless */
 
@@ -92,6 +94,9 @@
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
+#define IWL3165_FW_PRE "iwlwifi-3165-"
+#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
+
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
        {0},
 };
 
+const struct iwl_cfg iwl3165_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 3165",
+       .fw_name_pre = IWL3165_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL3165_NVM_VERSION,
+       .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
+       .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
 const struct iwl_cfg iwl7265_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 7265",
        .fw_name_pre = IWL7265_FW_PRE,
@@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 44b19e015102096e39fd086539794ab608c4c359..e93c6972290b84de3cf26214ca6894d0db0c4f79 100644 (file)
@@ -67,7 +67,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  9
+#define IWL8000_UCODE_API_MAX  10
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   8
index 8da596db9abe9cd308132c8577d10fabc6be09bc..3d7cc37420ae9d5b052e2f39de6c2705a8780a81 100644 (file)
@@ -120,6 +120,8 @@ enum iwl_led_mode {
 #define IWL_LONG_WD_TIMEOUT    10000
 #define IWL_MAX_WD_TIMEOUT     120000
 
+#define IWL_DEFAULT_MAX_TX_POWER 22
+
 /* Antenna presence definitions */
 #define        ANT_NONE        0x0
 #define        ANT_A           BIT(0)
@@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
 extern const struct iwl_cfg iwl3160_2ac_cfg;
 extern const struct iwl_cfg iwl3160_2n_cfg;
 extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl3165_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2n_cfg;
 extern const struct iwl_cfg iwl7265_n_cfg;
index 018af2957d3b7b319c47222eae4d1dfe1cdc3891..354255f087544ea455b708ef853bdb7f1a0fbb72 100644 (file)
@@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = {
 #define LAST_2GHZ_HT_PLUS              9
 #define LAST_5GHZ_HT                   161
 
-#define DEFAULT_MAX_TX_POWER 16
-
 /* rate data (static) */
 static struct ieee80211_rate iwl_cfg80211_rates[] = {
        { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
@@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                 * Default value - highest tx power value.  max_power
                 * is not used in mvm, and is used for backwards compatibility
                 */
-               channel->max_power = DEFAULT_MAX_TX_POWER;
+               channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
                is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
                IWL_DEBUG_EEPROM(dev,
                                 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
index 2291bbcaaeab8bfe57c3aa20f9409bab41ba4cef..ce71625f497ff8f34384b803121b9312aa35afde 100644 (file)
@@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        lockdep_assert_held(&mvm->mutex);
 
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
-               u32 mode;
-
                switch (mvm->bt_force_ant_mode) {
                case BT_FORCE_ANT_BT:
                        mode = BT_COEX_BT;
@@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        struct iwl_bt_iterator_data *data = _data;
        struct iwl_mvm *mvm = data->mvm;
        struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_smps_mode smps_mode;
+       /* default smps_mode is AUTOMATIC - only used for client modes */
+       enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
        u32 bt_activity_grading;
        int ave_rssi;
 
@@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               /* default smps_mode for BSS / P2P client is AUTOMATIC */
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
                break;
        case NL80211_IFTYPE_AP:
                if (!mvmvif->ap_ibss_active)
@@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        else if (bt_activity_grading >= BT_LOW_TRAFFIC)
                smps_mode = IEEE80211_SMPS_DYNAMIC;
 
-       /* relax SMPS contraints for next association */
+       /* relax SMPS constraints for next association */
        if (!vif->bss_conf.assoc)
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
index 2e90ff795c13212d6d8ca7be35df935a907d6b3c..87e517bffedc6647a0b1e9fbc27758647ea6907e 100644 (file)
@@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
 
        switch (param) {
        case MVM_DEBUGFS_PM_KEEP_ALIVE: {
-               struct ieee80211_hw *hw = mvm->hw;
-               int dtimper = hw->conf.ps_dtim_period ?: 1;
+               int dtimper = vif->bss_conf.dtim_period ?: 1;
                int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
 
                IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
index 95f5b3274efb516f280eee74eb8f9649c51d599a..9a922f3bd16bc00b7a5ce0e6484178db9372a24d 100644 (file)
@@ -1563,14 +1563,14 @@ enum iwl_sf_scenario {
 
 /**
  * Smart Fifo configuration command.
- * @state: smart fifo state, types listed in iwl_sf_sate.
+ * @state: smart fifo state, types listed in enum %iwl_sf_sate.
  * @watermark: Minimum allowed availabe free space in RXF for transient state.
  * @long_delay_timeouts: aging and idle timer values for each scenario
  * in long delay state.
  * @full_on_timeouts: timer values for each scenario in full on state.
  */
 struct iwl_sf_cfg_cmd {
-       enum iwl_sf_state state;
+       __le32 state;
        __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
        __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
        __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
index 0e523e28cabfc15b8beb63af3659cdc20d30b401..8242e689ddb114af0a2e17a0d5ce266c19392535 100644 (file)
@@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
            !force_assoc_off) {
                u32 dtim_offs;
 
-               /* Allow beacons to pass through as long as we are not
-                * associated, or we do not have dtim period information.
-                */
-               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
-
                /*
                 * The DTIM count counts down, so when it is N that means N
                 * more beacon intervals happen until the DTIM TBTT. Therefore
@@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
                ctxt_sta->is_assoc = cpu_to_le32(1);
        } else {
                ctxt_sta->is_assoc = cpu_to_le32(0);
+
+               /* Allow beacons to pass through as long as we are not
+                * associated, or we do not have dtim period information.
+                */
+               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
        }
 
        ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
index 7c8796584c253d322291ec02cd177539a3869a14..cdc272d776e7f298baa6c71dc156ce576021e151 100644 (file)
@@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-       /* TODO: enable that only for firmwares that don't crash */
-       /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */
-       hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
-       hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
-       /* we create the 802.11 header and zero length SSID IE. */
-       hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
+               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+               hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+               hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+               /* we create the 802.11 header and zero length SSID IE. */
+               hw->wiphy->max_sched_scan_ie_len =
+                       SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+       }
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                 */
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
-       } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
-                             BSS_CHANGED_QOS)) {
-               ret = iwl_mvm_power_update_mac(mvm);
-               if (ret)
-                       IWL_ERR(mvm, "failed to update power mode\n");
        }
 
        if (changes & BSS_CHANGED_BEACON_INFO) {
@@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
        }
 
+       if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
+               ret = iwl_mvm_power_update_mac(mvm);
+               if (ret)
+                       IWL_ERR(mvm, "failed to update power mode\n");
+       }
+
        if (changes & BSS_CHANGED_TXPOWER) {
                IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
                                bss_conf->txpower);
index 2b2d10800a55e1b90f3f4da4c91bd6cfe09629a8..d9769a23c68b44e9b16e3ce6bddd7aa191c45e27 100644 (file)
@@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif,
                                    struct iwl_mac_power_cmd *cmd)
 {
-       struct ieee80211_hw *hw = mvm->hw;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
        int dtimper, dtimper_msec;
@@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
        cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                            mvmvif->color));
-       dtimper = hw->conf.ps_dtim_period ?: 1;
+       dtimper = vif->bss_conf.dtim_period;
 
        /*
         * Regardless of power management state the driver must set
@@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        if (enable) {
                /* configure skip over dtim up to 300 msec */
-               int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+               int dtimper = vif->bss_conf.dtim_period ?: 1;
                int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
 
                if (WARN_ON(!dtimper_msec))
index 4b98987fc4133f6b7d7c0a21a1d11e57658a35f7..bf5cd8c8b0f798d2a0cebf24236f0730304e6b4a 100644 (file)
@@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
            le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
        energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_A_POS;
-       energy_a = energy_a ? -energy_a : -256;
+       energy_a = energy_a ? -energy_a : S8_MIN;
        energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_B_POS;
-       energy_b = energy_b ? -energy_b : -256;
+       energy_b = energy_b ? -energy_b : S8_MIN;
        energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_C_POS;
-       energy_c = energy_c ? -energy_c : -256;
+       energy_c = energy_c ? -energy_c : S8_MIN;
        max_energy = max(energy_a, energy_b);
        max_energy = max(max_energy, energy_c);
 
index 7edfd15efc9d001f227ea2c35b046c0f47cb55af..e843b67f2201f5c3159747c4128d08b1c28638a8 100644 (file)
@@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                             enum iwl_sf_state new_state)
 {
        struct iwl_sf_cfg_cmd sf_cmd = {
-               .state = new_state,
+               .state = cpu_to_le32(new_state),
        };
        struct ieee80211_sta *sta;
        int ret = 0;
index dbc870713882937c381a90ea2a1c31a2a9f60af8..9ee410bf6da243ced2b65bf61457eecbe3634916 100644 (file)
@@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
 
        /*
         * for data packets, rate info comes from the table inside the fw. This
-        * table is controlled by LINK_QUALITY commands
+        * table is controlled by LINK_QUALITY commands. Exclude ctrl port
+        * frames like EAPOLs which should be treated as mgmt frames. This
+        * avoids them being sent initially in high rates which increases the
+        * chances for completion of the 4-Way handshake.
         */
 
-       if (ieee80211_is_data(fc) && sta) {
+       if (ieee80211_is_data(fc) && sta &&
+           !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
                tx_cmd->initial_rate_index = 0;
                tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
                return;
index f0e722ced08032511514840bb9d13f5485000b2f..073a68b97a7224233c9c4a3a18af4f0f4f3afecd 100644 (file)
@@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
 
+/* 3165 Series */
+       {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
@@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
index 33da3dfcfa4f05f112e4271487c9f5131bb3f20a..d4bd550f505c7dcfa59bc60f4c852a5cf6aed39e 100644 (file)
@@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter)
 
        bool is_legacy = false;
 
-       if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+       if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G))
                is_legacy = true;
 
        return is_legacy;
index 361435f8608a125ca4cfdfd38b1bfa6212215ec9..1ac6383e79471f5944e4267f3ae8bc28715c53e2 100644 (file)
@@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+       {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
        {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
        {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
index e29e15dca86ee3b7d1efc6d84a2607bcd173b449..f379689dde309b7bf63eb511ef7f28a827f71fc5 100644 (file)
@@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
        init_waitqueue_head(&queue->dealloc_wq);
        atomic_set(&queue->inflight_packets, 0);
 
+       netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
+                       XENVIF_NAPI_WEIGHT);
+
        if (tx_evtchn == rx_evtchn) {
                /* feature-split-event-channels == 0 */
                err = bind_interdomain_evtchn_to_irqhandler(
@@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
        wake_up_process(queue->task);
        wake_up_process(queue->dealloc_task);
 
-       netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
-                       XENVIF_NAPI_WEIGHT);
-
        return 0;
 
 err_rx_unbind:
index f868333271aafc665d45f7e3b1ec6dfcdbf35246..963a4a5dc88e5aa2e13d814b5731fb1f67f74102 100644 (file)
@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
                targets->sens_res =
                         be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
                targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
-               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
-                      skb->data[MICROREAD_EMCF_A_LEN]);
                targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
+               if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+                       r = -EINVAL;
+                       goto exit_free;
+               }
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
+                      targets->nfcid1_len);
                break;
        case MICROREAD_GATE_ID_MREAD_ISO_A_3:
                targets->supported_protocols =
@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
                targets->sens_res =
                         be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
                targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
-               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
-                      skb->data[MICROREAD_EMCF_A3_LEN]);
                targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
+               if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+                       r = -EINVAL;
+                       goto exit_free;
+               }
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
+                      targets->nfcid1_len);
                break;
        case MICROREAD_GATE_ID_MREAD_ISO_B:
                targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
index db7a38ae05f7a5cee13241b43ae1a675dc79ab1f..7d688f97aa278311731f7e5d8a96af9b974cb253 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for ST21NFCA HCI based NFC driver
 #
 
-st21nfca_i2c-objs  = i2c.o
+st21nfca_hci-objs = st21nfca.o st21nfca_dep.o
+obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca_hci.o
 
-obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca.o st21nfca_dep.o
+st21nfca_i2c-objs  = i2c.o
 obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o
index 13d9f03b2feaa1e3603b0d2ab5e10404afdd2551..f4d835dd15f24230afaf995496146b8a520b0cfb 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for ST21NFCB NCI based NFC driver
 #
 
-st21nfcb_i2c-objs  = i2c.o
+st21nfcb_nci-objs = ndlc.o st21nfcb.o
+obj-$(CONFIG_NFC_ST21NFCB)     += st21nfcb_nci.o
 
-obj-$(CONFIG_NFC_ST21NFCB)     += st21nfcb.o ndlc.o
+st21nfcb_i2c-objs = i2c.o
 obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o
index 9dd63b82202555e0ebdd46b3460a8ec1107f8f9f..e9bf2f47b61ada12a6730990c433d57a71ce90ce 100644 (file)
@@ -510,7 +510,7 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
 
        WARN_ON(nt->mw[mw_num].virt_addr == NULL);
 
-       if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
                num_qps_mw = nt->max_qps / mw_max + 1;
        else
                num_qps_mw = nt->max_qps / mw_max;
@@ -576,6 +576,19 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
                return -ENOMEM;
        }
 
+       /*
+        * we must ensure that the memory address allocated is BAR size
+        * aligned in order for the XLAT register to take the value. This
+        * is a requirement of the hardware. It is recommended to setup CMA
+        * for BAR sizes equal or greater than 4MB.
+        */
+       if (!IS_ALIGNED(mw->dma_addr, mw->size)) {
+               dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n",
+                       &mw->dma_addr);
+               ntb_free_mw(nt, num_mw);
+               return -ENOMEM;
+       }
+
        /* Notify HW the memory location of the receive buffer */
        ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
 
@@ -856,7 +869,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
        qp->client_ready = NTB_LINK_DOWN;
        qp->event_handler = NULL;
 
-       if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
                num_qps_mw = nt->max_qps / mw_max + 1;
        else
                num_qps_mw = nt->max_qps / mw_max;
index d8574adf0d62d446c02011970096e74c921ec6f1..293ed4b687ba7265889002edcb4f01b7d7edfe21 100644 (file)
@@ -138,6 +138,9 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
        /* Important: Don't leak passwords */
        bool secure = strncmp(pp->name, "security-", 9) == 0;
 
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return 0;
+
        if (!of_kset || !of_node_is_attached(np))
                return 0;
 
@@ -158,6 +161,9 @@ int __of_attach_node_sysfs(struct device_node *np)
        struct property *pp;
        int rc;
 
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return 0;
+
        if (!of_kset)
                return 0;
 
@@ -1713,6 +1719,9 @@ int __of_remove_property(struct device_node *np, struct property *prop)
 
 void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
 {
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return;
+
        /* at early boot, bail here and defer setup to of_init() */
        if (of_kset && of_node_is_attached(np))
                sysfs_remove_bin_file(&np->kobj, &prop->attr);
@@ -1777,6 +1786,9 @@ int __of_update_property(struct device_node *np, struct property *newprop,
 void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
                struct property *oldprop)
 {
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return;
+
        /* At early boot, bail out and defer setup to of_init() */
        if (!of_kset)
                return;
@@ -1847,6 +1859,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
 {
        struct property *pp;
 
+       of_aliases = of_find_node_by_path("/aliases");
        of_chosen = of_find_node_by_path("/chosen");
        if (of_chosen == NULL)
                of_chosen = of_find_node_by_path("/chosen@0");
@@ -1862,7 +1875,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
                        of_stdout = of_find_node_by_path(name);
        }
 
-       of_aliases = of_find_node_by_path("/aliases");
        if (!of_aliases)
                return;
 
@@ -1986,7 +1998,7 @@ bool of_console_check(struct device_node *dn, char *name, int index)
 {
        if (!dn || dn != of_stdout || console_set_on_cmdline)
                return false;
-       return add_preferred_console(name, index, NULL);
+       return !add_preferred_console(name, index, NULL);
 }
 EXPORT_SYMBOL_GPL(of_console_check);
 
index 54fecc49a1fe4bb702576590714798799b777a65..f297891d852908ae3e78fe9bfa2bfe5b231875e6 100644 (file)
@@ -45,6 +45,9 @@ void __of_detach_node_sysfs(struct device_node *np)
 {
        struct property *pp;
 
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return;
+
        BUG_ON(!of_node_is_initialized(np));
        if (!of_kset)
                return;
index 79cb8313c7d8b0b86681bc75867e8b6e5ee746ef..d1ffca8b34eac51853c812a230cfd438862f8cff 100644 (file)
@@ -928,7 +928,11 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
        const u64 phys_offset = __pa(PAGE_OFFSET);
-       base &= PAGE_MASK;
+
+       if (!PAGE_ALIGNED(base)) {
+               size -= PAGE_SIZE - (base & ~PAGE_MASK);
+               base = PAGE_ALIGN(base);
+       }
        size &= PAGE_MASK;
 
        if (base > MAX_PHYS_ADDR) {
@@ -937,10 +941,10 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
                return;
        }
 
-       if (base + size > MAX_PHYS_ADDR) {
-               pr_warning("Ignoring memory range 0x%lx - 0x%llx\n",
-                               ULONG_MAX, base + size);
-               size = MAX_PHYS_ADDR - base;
+       if (base + size - 1 > MAX_PHYS_ADDR) {
+               pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+                               ((u64)MAX_PHYS_ADDR) + 1, base + size);
+               size = MAX_PHYS_ADDR - base + 1;
        }
 
        if (base + size < phys_offset) {
index 9eae9834bcc7a1ee02ea01c8debb40882f416db9..a0580afe1713a5f58db5e96da635028856200a08 100644 (file)
@@ -913,7 +913,7 @@ static int __init dino_probe(struct parisc_device *dev)
        printk("%s version %s found at 0x%lx\n", name, version, hpa);
 
        if (!request_mem_region(hpa, PAGE_SIZE, name)) {
-               printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
+               printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%lx)!\n",
                        hpa);
                return 1;
        }
index 0f54ab6260dfdcd719542e2a420db5b568b15ba4..3651c3871d5b45f4afc735709503aa315139c362 100644 (file)
@@ -278,7 +278,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
 {
        struct hardware_path hwpath;
        unsigned short i;
-       char in[count+1], *temp;
+       char in[64], *temp;
        struct device *dev;
        int ret;
 
@@ -286,8 +286,9 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
        
        /* Let's clean up the target. 0xff is a blank pattern */
        memset(&hwpath, 0xff, sizeof(hwpath));
@@ -393,14 +394,15 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count
 {
        unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */
        unsigned short i;
-       char in[count+1], *temp;
+       char in[64], *temp;
 
        if (!entry || !buf || !count)
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
        
        /* Let's clean up the target. 0 is a blank pattern */
        memset(&layers, 0, sizeof(layers));
@@ -755,7 +757,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
 {
        struct pdcspath_entry *pathentry;
        unsigned char flags;
-       char in[count+1], *temp;
+       char in[8], *temp;
        char c;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -765,8 +767,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
 
        /* Current flags are stored in primary boot path entry */
        pathentry = &pdcspath_entry_primary;
index a042d065a0c757bdee271718808c349c6a07898e..8be2096c842390f5ecefc24186267e9ce4f61e2b 100644 (file)
@@ -395,7 +395,8 @@ static void __init superio_serial_init(void)
        serial_port.iotype      = UPIO_PORT;
        serial_port.type        = PORT_16550A;
        serial_port.uartclk     = 115200*16;
-       serial_port.fifosize    = 16;
+       serial_port.flags       = UPF_FIXED_PORT | UPF_FIXED_TYPE |
+                                 UPF_BOOT_AUTOCONF;
 
        /* serial port #1 */
        serial_port.iobase      = sio_dev.sp1_base;
index a568efaa331c57cc35144b08fe3f3b386118c926..35fc73a8d0b3d0cff77f13c280d17549a57c1128 100644 (file)
@@ -49,6 +49,9 @@ struct imx6_pcie {
 
 /* PCIe Port Logic registers (memory-mapped) */
 #define PL_OFFSET 0x700
+#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
+#define PCIE_PL_PFLR_LINK_STATE_MASK           (0x3f << 16)
+#define PCIE_PL_PFLR_FORCE_LINK                        (1 << 15)
 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING        (1 << 29)
@@ -214,6 +217,32 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
 static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
 {
        struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+       u32 val, gpr1, gpr12;
+
+       /*
+        * If the bootloader already enabled the link we need some special
+        * handling to get the core back into a state where it is safe to
+        * touch it for configuration.  As there is no dedicated reset signal
+        * wired up for MX6QDL, we need to manually force LTSSM into "detect"
+        * state before completely disabling LTSSM, which is a prerequisite
+        * for core configuration.
+        *
+        * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
+        * indication that the bootloader activated the link.
+        */
+       regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
+       regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+
+       if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
+           (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
+               val = readl(pp->dbi_base + PCIE_PL_PFLR);
+               val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
+               val |= PCIE_PL_PFLR_FORCE_LINK;
+               writel(val, pp->dbi_base + PCIE_PL_PFLR);
+
+               regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                               IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+       }
 
        regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
                        IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
@@ -589,6 +618,14 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
        return 0;
 }
 
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+       struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+       /* bring down link, so bootloader gets clean state in case of reboot */
+       imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+}
+
 static const struct of_device_id imx6_pcie_of_match[] = {
        { .compatible = "fsl,imx6q-pcie", },
        {},
@@ -601,6 +638,7 @@ static struct platform_driver imx6_pcie_driver = {
                .owner  = THIS_MODULE,
                .of_match_table = imx6_pcie_of_match,
        },
+       .shutdown = imx6_pcie_shutdown,
 };
 
 /* Freescale PCIe driver does not allow module unload */
index 70741c8c46a0cae82069ea1eba5ffc8e98a3d834..6cd5160fc057654a20e2630e438513028a5bab39 100644 (file)
@@ -560,19 +560,15 @@ static void disable_slot(struct acpiphp_slot *slot)
        slot->flags &= (~SLOT_ENABLED);
 }
 
-static bool acpiphp_no_hotplug(struct acpi_device *adev)
-{
-       return adev && adev->flags.no_hotplug;
-}
-
 static bool slot_no_hotplug(struct acpiphp_slot *slot)
 {
-       struct acpiphp_func *func;
+       struct pci_bus *bus = slot->bus;
+       struct pci_dev *dev;
 
-       list_for_each_entry(func, &slot->funcs, sibling)
-               if (acpiphp_no_hotplug(func_to_acpi_device(func)))
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               if (PCI_SLOT(dev->devfn) == slot->device && dev->ignore_hotplug)
                        return true;
-
+       }
        return false;
 }
 
@@ -645,7 +641,7 @@ static void trim_stale_devices(struct pci_dev *dev)
 
                status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
                alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
-                       || acpiphp_no_hotplug(adev);
+                       || dev->ignore_hotplug;
        }
        if (!alive)
                alive = pci_device_is_present(dev);
index 9da84b8b27d8a4cdac4305cde5ac50d203b89583..2a412fa3b338f4b8399685c5b46e43a4e925871a 100644 (file)
@@ -160,7 +160,7 @@ static void pcie_wait_cmd(struct controller *ctrl)
            ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
                rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
        else
-               rc = pcie_poll_cmd(ctrl, timeout);
+               rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
 
        /*
         * Controllers with errata like Intel CF118 don't generate
@@ -506,6 +506,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
 {
        struct controller *ctrl = (struct controller *)dev_id;
        struct pci_dev *pdev = ctrl_dev(ctrl);
+       struct pci_bus *subordinate = pdev->subordinate;
+       struct pci_dev *dev;
        struct slot *slot = ctrl->slot;
        u16 detected, intr_loc;
 
@@ -539,6 +541,16 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
                wake_up(&ctrl->queue);
        }
 
+       if (subordinate) {
+               list_for_each_entry(dev, &subordinate->devices, bus_list) {
+                       if (dev->ignore_hotplug) {
+                               ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
+                                        intr_loc, pci_name(dev));
+                               return IRQ_HANDLED;
+                       }
+               }
+       }
+
        if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
                return IRQ_HANDLED;
 
index e246a10a0d2c5e4eb729e3d4c822f6dda19f176a..3e36ec8d708afe7afa52c733436786a7f1efe3dd 100644 (file)
@@ -46,7 +46,6 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
                 */
                if (pci_is_pcie(dev))
                        return;
-               dev_info(&dev->dev, "using default PCI settings\n");
                hpp = &pci_default_type0;
        }
 
@@ -153,7 +152,6 @@ void pci_configure_slot(struct pci_dev *dev)
 {
        struct pci_dev *cdev;
        struct hotplug_params hpp;
-       int ret;
 
        if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
                        (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
@@ -163,9 +161,7 @@ void pci_configure_slot(struct pci_dev *dev)
        pcie_bus_configure_settings(dev->bus);
 
        memset(&hpp, 0, sizeof(hpp));
-       ret = pci_get_hp_params(dev, &hpp);
-       if (ret)
-               dev_warn(&dev->dev, "no hotplug settings from platform\n");
+       pci_get_hp_params(dev, &hpp);
 
        program_hpp_type2(dev, hpp.t2);
        program_hpp_type1(dev, hpp.t1);
index e3cf8a2e629216208750bc96ed84285e7a2c0118..4170113cde6141a9962b23a9d6342647a0465210 100644 (file)
@@ -775,7 +775,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
        /* Check if setup is sensible at all */
        if (!pass &&
            (primary != bus->number || secondary <= bus->number ||
-            secondary > subordinate || subordinate > bus->busn_res.end)) {
+            secondary > subordinate)) {
                dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
                         secondary, subordinate);
                broken = 1;
@@ -838,23 +838,18 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
                        goto out;
                }
 
-               if (max >= bus->busn_res.end) {
-                       dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
-                                max, &bus->busn_res);
-                       goto out;
-               }
-
                /* Clear errors */
                pci_write_config_word(dev, PCI_STATUS, 0xffff);
 
-               /* The bus will already exist if we are rescanning */
+               /* Prevent assigning a bus number that already exists.
+                * This can happen when a bridge is hot-plugged, so in
+                * this case we only re-scan this bus. */
                child = pci_find_bus(pci_domain_nr(bus), max+1);
                if (!child) {
                        child = pci_add_new_bus(bus, dev, max+1);
                        if (!child)
                                goto out;
-                       pci_bus_insert_busn_res(child, max+1,
-                                               bus->busn_res.end);
+                       pci_bus_insert_busn_res(child, max+1, 0xff);
                }
                max++;
                buses = (buses & 0xff000000)
@@ -913,11 +908,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
                /*
                 * Set the subordinate bus number to its real value.
                 */
-               if (max > bus->busn_res.end) {
-                       dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
-                                max, &bus->busn_res);
-                       max = bus->busn_res.end;
-               }
                pci_bus_update_busn_res_end(child, max);
                pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
        }
index 0dd742719154a9d7172dcd9c4f9f9dc3e26e6030..f833aa271a2e2306877cf3624157eaa189615702 100644 (file)
@@ -41,9 +41,9 @@ config PHY_MVEBU_SATA
 config PHY_MIPHY365X
        tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
        depends on ARCH_STI
-       depends on GENERIC_PHY
        depends on HAS_IOMEM
        depends on OF
+       select GENERIC_PHY
        help
          Enable this to support the miphy transceiver (for SATA/PCIE)
          that is part of STMicroelectronics STiH41x SoC series.
@@ -214,12 +214,14 @@ config PHY_QCOM_IPQ806X_SATA
 config PHY_ST_SPEAR1310_MIPHY
        tristate "ST SPEAR1310-MIPHY driver"
        select GENERIC_PHY
+       depends on MACH_SPEAR1310 || COMPILE_TEST
        help
          Support for ST SPEAr1310 MIPHY which can be used for PCIe and SATA.
 
 config PHY_ST_SPEAR1340_MIPHY
        tristate "ST SPEAR1340-MIPHY driver"
        select GENERIC_PHY
+       depends on MACH_SPEAR1340 || COMPILE_TEST
        help
          Support for ST SPEAr1340 MIPHY which can be used for PCIe and SATA.
 
index b05302b09c9fd0f0bdca202f1b52f1e51b94e0f1..392101c8d6b04aad1f9cc25119c65cac0ff73471 100644 (file)
@@ -542,6 +542,7 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
        },
        { },
 };
+MODULE_DEVICE_TABLE(of, exynos5_usbdrd_phy_of_match);
 
 static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
 {
index e111baf187ce2d4a37484601e41fa29ca7e757bb..e0fb7a1e5a5a9c9cf0be5fc109ba09556cb35593 100644 (file)
@@ -163,6 +163,7 @@ enum miphy_sata_gen {
 };
 
 static u8 rx_tx_spd[] = {
+       0, /* GEN0 doesn't exist. */
        TX_SPDSEL_GEN1_VAL | RX_SPDSEL_GEN1_VAL,
        TX_SPDSEL_GEN2_VAL | RX_SPDSEL_GEN2_VAL,
        TX_SPDSEL_GEN3_VAL | RX_SPDSEL_GEN3_VAL
index e1a6623d4696f1c24267292764b58f65b6137f47..9cd33a4bcfb193c99bd03d981f9d75d0e54f7bac 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
 #include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
 #include <linux/usb/musb-omap.h>
 #include <linux/usb/ulpi.h>
 #include <linux/i2c/twl.h>
@@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
        }
 }
 
-static int twl4030_phy_power_off(struct phy *phy)
+static int twl4030_usb_runtime_suspend(struct device *dev)
 {
-       struct twl4030_usb *twl = phy_get_drvdata(phy);
+       struct twl4030_usb *twl = dev_get_drvdata(dev);
 
+       dev_dbg(twl->dev, "%s\n", __func__);
        if (twl->asleep)
                return 0;
 
        twl4030_phy_power(twl, 0);
        twl->asleep = 1;
-       dev_dbg(twl->dev, "%s\n", __func__);
+
        return 0;
 }
 
-static void __twl4030_phy_power_on(struct twl4030_usb *twl)
+static int twl4030_usb_runtime_resume(struct device *dev)
 {
+       struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+       dev_dbg(twl->dev, "%s\n", __func__);
+       if (!twl->asleep)
+               return 0;
+
        twl4030_phy_power(twl, 1);
-       twl4030_i2c_access(twl, 1);
-       twl4030_usb_set_mode(twl, twl->usb_mode);
-       if (twl->usb_mode == T2_USB_MODE_ULPI)
-               twl4030_i2c_access(twl, 0);
+       twl->asleep = 0;
+
+       return 0;
+}
+
+static int twl4030_phy_power_off(struct phy *phy)
+{
+       struct twl4030_usb *twl = phy_get_drvdata(phy);
+
+       dev_dbg(twl->dev, "%s\n", __func__);
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
+       return 0;
 }
 
 static int twl4030_phy_power_on(struct phy *phy)
 {
        struct twl4030_usb *twl = phy_get_drvdata(phy);
 
-       if (!twl->asleep)
-               return 0;
-       __twl4030_phy_power_on(twl);
-       twl->asleep = 0;
        dev_dbg(twl->dev, "%s\n", __func__);
+       pm_runtime_get_sync(twl->dev);
+       twl4030_i2c_access(twl, 1);
+       twl4030_usb_set_mode(twl, twl->usb_mode);
+       if (twl->usb_mode == T2_USB_MODE_ULPI)
+               twl4030_i2c_access(twl, 0);
 
        /*
         * XXX When VBUS gets driven after musb goes to A mode,
@@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                 * USB_LINK_VBUS state.  musb_hdrc won't care until it
                 * starts to handle softconnect right.
                 */
+               if ((status == OMAP_MUSB_VBUS_VALID) ||
+                   (status == OMAP_MUSB_ID_GROUND)) {
+                       if (twl->asleep)
+                               pm_runtime_get_sync(twl->dev);
+               } else {
+                       if (!twl->asleep) {
+                               pm_runtime_mark_last_busy(twl->dev);
+                               pm_runtime_put_autosuspend(twl->dev);
+                       }
+               }
                omap_musb_mailbox(status);
        }
-       sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+
+       /* don't schedule during sleep - irq works right then */
+       if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
+               cancel_delayed_work(&twl->id_workaround_work);
+               schedule_delayed_work(&twl->id_workaround_work, HZ);
+       }
+
+       if (irq)
+               sysfs_notify(&twl->dev->kobj, NULL, "vbus");
 
        return IRQ_HANDLED;
 }
@@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work)
 {
        struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
                id_workaround_work.work);
-       enum omap_musb_vbus_id_status status;
-       bool status_changed = false;
-
-       status = twl4030_usb_linkstat(twl);
-
-       spin_lock_irq(&twl->lock);
-       if (status >= 0 && status != twl->linkstat) {
-               twl->linkstat = status;
-               status_changed = true;
-       }
-       spin_unlock_irq(&twl->lock);
-
-       if (status_changed) {
-               dev_dbg(twl->dev, "handle missing status change to %d\n",
-                               status);
-               omap_musb_mailbox(status);
-       }
 
-       /* don't schedule during sleep - irq works right then */
-       if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
-               cancel_delayed_work(&twl->id_workaround_work);
-               schedule_delayed_work(&twl->id_workaround_work, HZ);
-       }
+       twl4030_usb_irq(0, twl);
 }
 
 static int twl4030_phy_init(struct phy *phy)
@@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy)
        struct twl4030_usb *twl = phy_get_drvdata(phy);
        enum omap_musb_vbus_id_status status;
 
-       /*
-        * Start in sleep state, we'll get called through set_suspend()
-        * callback when musb is runtime resumed and it's time to start.
-        */
-       __twl4030_phy_power(twl, 0);
-       twl->asleep = 1;
-
+       pm_runtime_get_sync(twl->dev);
        status = twl4030_usb_linkstat(twl);
        twl->linkstat = status;
 
-       if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) {
+       if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID)
                omap_musb_mailbox(twl->linkstat);
-               twl4030_phy_power_on(phy);
-       }
 
        sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
        return 0;
 }
 
@@ -650,6 +661,11 @@ static const struct phy_ops ops = {
        .owner          = THIS_MODULE,
 };
 
+static const struct dev_pm_ops twl4030_usb_pm_ops = {
+       SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
+                          twl4030_usb_runtime_resume, NULL)
+};
+
 static int twl4030_usb_probe(struct platform_device *pdev)
 {
        struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
@@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
 
        ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
 
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+
        /* Our job is to use irqs and status from the power module
         * to keep the transceiver disabled when nothing's connected.
         *
@@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev)
                return status;
        }
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
        dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
        return 0;
 }
@@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
        struct twl4030_usb *twl = platform_get_drvdata(pdev);
        int val;
 
+       pm_runtime_get_sync(twl->dev);
        cancel_delayed_work(&twl->id_workaround_work);
        device_remove_file(twl->dev, &dev_attr_vbus);
 
@@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
 
        /* disable complete OTG block */
        twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
-
-       if (!twl->asleep)
-               twl4030_phy_power(twl, 0);
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put(twl->dev);
 
        return 0;
 }
@@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = {
        .remove         = twl4030_usb_remove,
        .driver         = {
                .name   = "twl4030_usb",
+               .pm     = &twl4030_usb_pm_ops,
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(twl4030_usb_id_table),
        },
index 9ca59a01874316585a190093b0265401e17975a3..e12e5b07f6d751aba9cce63cf49d2acabe50b7ab 100644 (file)
@@ -461,6 +461,7 @@ static struct irq_chip byt_irqchip = {
        .irq_mask = byt_irq_mask,
        .irq_unmask = byt_irq_unmask,
        .irq_set_type = byt_irq_type,
+       .flags = IRQCHIP_SKIP_SET_WAKE,
 };
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
index 337634ad0562449495c12fb4e39efd989f85edef..6d77dcd7dcf6cfb3a8f76745c779d09eae2f7d97 100644 (file)
@@ -319,7 +319,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
                                    struct regulator_config *config)
 {
        struct device_node *nproot, *np;
-       nproot = of_node_get(pdev->dev.parent->of_node);
+       nproot = pdev->dev.parent->of_node;
        if (!nproot)
                return -ENODEV;
        nproot = of_get_child_by_name(nproot, "regulators");
index fdb6ea8ae7e64dc73a7cb2cc4f16a8b43c533b72..00033625a09ce3c601833ec6f7f6b8a0e723956a 100644 (file)
@@ -422,9 +422,9 @@ static int da9052_regulator_probe(struct platform_device *pdev)
                config.init_data = pdata->regulators[pdev->id];
        } else {
 #ifdef CONFIG_OF
-               struct device_node *nproot, *np;
+               struct device_node *nproot = da9052->dev->of_node;
+               struct device_node *np;
 
-               nproot = of_node_get(da9052->dev->of_node);
                if (!nproot)
                        return -ENODEV;
 
index 9623e9e290bf91bb1b7a330946b6783e5bdc7707..3426be89c9f6e3753b36f90fe87cc72324d4a2e4 100644 (file)
@@ -226,7 +226,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
        struct device_node *np, *regulators;
        int ret;
 
-       np = of_node_get(pdev->dev.parent->of_node);
+       np = pdev->dev.parent->of_node;
        if (!np)
                return 0;
 
index dad2bcd14e962759d9f5e7b9ae579f6c1369b325..7770777befc448a848c0071faddd1b0818cb6137 100644 (file)
@@ -250,7 +250,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
        struct device_node *nproot, *np;
        int rcount;
 
-       nproot = of_node_get(pdev->dev.parent->of_node);
+       nproot = pdev->dev.parent->of_node;
        if (!nproot)
                return -ENODEV;
        np = of_get_child_by_name(nproot, "regulators");
index 90b4c530dee530b7255b21d238929d744de5abcd..9c31e215a521e71c7cf7148f8e69a09ce5f58acc 100644 (file)
@@ -917,7 +917,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
        struct max8997_regulator_data *rdata;
        unsigned int i, dvs_voltage_nr = 1, ret;
 
-       pmic_np = of_node_get(iodev->dev->of_node);
+       pmic_np = iodev->dev->of_node;
        if (!pmic_np) {
                dev_err(&pdev->dev, "could not find pmic sub-node\n");
                return -ENODEV;
index a7ce34d1b5f2e8514ad33fcb2170c35ab7e5ecca..1878e5b567efc6b98662670ab183e99d9c5c1e38 100644 (file)
@@ -1427,7 +1427,6 @@ static void palmas_dt_to_pdata(struct device *dev,
        u32 prop;
        int idx, ret;
 
-       node = of_node_get(node);
        regulators = of_get_child_by_name(node, "regulators");
        if (!regulators) {
                dev_info(dev, "regulator node not found\n");
index fa7db8847578abd61787d0820661e675c3cd80b0..e584c998b55f203eb601559c1b4ea0113cd29c34 100644 (file)
@@ -1014,7 +1014,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
        if (!pmic_plat_data)
                return NULL;
 
-       np = of_node_get(pdev->dev.parent->of_node);
+       np = pdev->dev.parent->of_node;
        regulators = of_get_child_by_name(np, "regulators");
        if (!regulators) {
                dev_err(&pdev->dev, "regulator node not found\n");
index 8225b89de810c88c794c4251d56f1ce613c906b2..c384fec6d173b2f051c2871e78fb0128ac65782c 100644 (file)
@@ -232,6 +232,7 @@ static struct platform_driver efi_rtc_driver = {
 
 module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe);
 
+MODULE_ALIAS("platform:rtc-efi");
 MODULE_AUTHOR("dann frazier <dannf@hp.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("EFI RTC driver");
index 2ead7e78c4568ec7fe50ece74c3109d424efa852..14ba80bfa571bc4b34eb795c85f6725b96f6ad7b 100644 (file)
@@ -77,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx);
  * strings when running as a module.
  */
 static char *dasd[256];
-module_param_array(dasd, charp, NULL, 0);
+module_param_array(dasd, charp, NULL, S_IRUGO);
 
 /*
  * Single spinlock to protect devmap and servermap structures and lists.
index 97ef37b51068369b44d41376e630534d014241a5..e7646ce3d659218ca7fd9762e05cf0d7c4c95a5c 100644 (file)
@@ -889,6 +889,7 @@ extern const struct attribute_group *qeth_generic_attr_groups[];
 extern const struct attribute_group *qeth_osn_attr_groups[];
 extern struct workqueue_struct *qeth_wq;
 
+int qeth_card_hw_is_reachable(struct qeth_card *);
 const char *qeth_get_cardname_short(struct qeth_card *);
 int qeth_realloc_buffer_pool(struct qeth_card *, int);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
index c0d6ba8655c742991bc13fe1b2fe2a7a740ea4ac..fd22c811cbe195d2056f6bb5b6339fa33113b8cc 100644 (file)
@@ -73,6 +73,13 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 struct workqueue_struct *qeth_wq;
 EXPORT_SYMBOL_GPL(qeth_wq);
 
+int qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+       return (card->state == CARD_STATE_SOFTSETUP) ||
+               (card->state == CARD_STATE_UP);
+}
+EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
+
 static void qeth_close_dev_handler(struct work_struct *work)
 {
        struct qeth_card *card;
@@ -5790,6 +5797,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
        struct qeth_card *card = netdev->ml_priv;
        enum qeth_link_types link_type;
        struct carrier_info carrier_info;
+       int rc;
        u32 speed;
 
        if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
@@ -5832,8 +5840,14 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
        /* Check if we can obtain more accurate information.     */
        /* If QUERY_CARD_INFO command is not supported or fails, */
        /* just return the heuristics that was filled above.     */
-       if (qeth_query_card_info(card, &carrier_info) != 0)
+       if (!qeth_card_hw_is_reachable(card))
+               return -ENODEV;
+       rc = qeth_query_card_info(card, &carrier_info);
+       if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
                return 0;
+       if (rc) /* report error from the hardware operation */
+               return rc;
+       /* on success, fill in the information got from the hardware */
 
        netdev_dbg(netdev,
        "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
index ae1bc04b8653eed571dbbc9a271ec9cdc93205f3..59e3aa538b4da4594456b0965bc0fe42a49379f3 100644 (file)
@@ -5,17 +5,12 @@
 
 #include <linux/slab.h>
 #include <asm/ebcdic.h>
+#include "qeth_core.h"
 #include "qeth_l2.h"
 
 #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
 struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
 
-static int qeth_card_hw_is_reachable(struct qeth_card *card)
-{
-       return (card->state == CARD_STATE_SOFTSETUP) ||
-               (card->state == CARD_STATE_UP);
-}
-
 static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
                                struct device_attribute *attr, char *buf,
                                int show_state)
index 18a3358eb1d47c9f8cd6acd9e62816691afbff66..bd85fb4978e0a8fab00de545c14519c0e78f77d5 100644 (file)
@@ -43,7 +43,7 @@ config SCSI_DMA
 config SCSI_NETLINK
        bool
        default n
-       select NET
+       depends on NET
 
 config SCSI_PROC_FS
        bool "legacy /proc/scsi/ support"
@@ -257,7 +257,7 @@ config SCSI_SPI_ATTRS
 
 config SCSI_FC_ATTRS
        tristate "FiberChannel Transport Attributes"
-       depends on SCSI
+       depends on SCSI && NET
        select SCSI_NETLINK
        help
          If you wish to export transport-specific information about
@@ -585,28 +585,28 @@ config HYPERV_STORAGE
 
 config LIBFC
        tristate "LibFC module"
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select CRC32
        ---help---
          Fibre Channel library module
 
 config LIBFCOE
        tristate "LibFCoE module"
-       select LIBFC
+       depends on LIBFC
        ---help---
          Library for Fibre Channel over Ethernet module
 
 config FCOE
        tristate "FCoE module"
        depends on PCI
-       select LIBFCOE
+       depends on LIBFCOE
        ---help---
          Fibre Channel over Ethernet module
 
 config FCOE_FNIC
        tristate "Cisco FNIC Driver"
        depends on PCI && X86
-       select LIBFCOE
+       depends on LIBFCOE
        help
          This is support for the Cisco PCI-Express FCoE HBA.
 
@@ -816,7 +816,7 @@ config SCSI_IBMVSCSI
 config SCSI_IBMVFC
        tristate "IBM Virtual FC support"
        depends on PPC_PSERIES && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
          This is the IBM POWER Virtual FC Client
 
@@ -1266,7 +1266,7 @@ source "drivers/scsi/qla4xxx/Kconfig"
 config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
@@ -1676,7 +1676,7 @@ config SCSI_SUNESP
 config ZFCP
        tristate "FCP host bus adapter driver for IBM eServer zSeries"
        depends on S390 && QDIO && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
           If you want to access SCSI devices attached to your IBM eServer
           zSeries by means of Fibre Channel interfaces say Y.
@@ -1704,7 +1704,7 @@ config SCSI_PM8001
 config SCSI_BFA_FC
        tristate "Brocade BFA Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
          This bfa driver supports all Brocade PCIe FC/FCOE host adapters.
 
index f245d543d7b1405f060fcd90491382e37739d24b..0978828826498693f794f661f0a6dc0f9cde6170 100644 (file)
@@ -1,11 +1,12 @@
 config SCSI_BNX2X_FCOE
        tristate "QLogic NetXtreme II FCoE support"
        depends on PCI
+       depends on (IPV6 || IPV6=n)
+       depends on LIBFC
+       depends on LIBFCOE
        select NETDEVICES
        select ETHERNET
        select NET_VENDOR_BROADCOM
-       select LIBFC
-       select LIBFCOE
        select CNIC
        ---help---
        This driver supports FCoE offload for the QLogic NetXtreme II
index 44ce54e536e54c4e3c2ef02a2bdd471d6b9c5e32..ba30ff86d5818579ac7662c73ffb6b03e0c6caf6 100644 (file)
@@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI
        tristate "QLogic NetXtreme II iSCSI support"
        depends on NET
        depends on PCI
+       depends on (IPV6 || IPV6=n)
        select SCSI_ISCSI_ATTRS
        select NETDEVICES
        select ETHERNET
index 4d03b032aa10b3ec9acf6452f2e172c35ffbdf4e..7c7e5085968bdd7ea7ef37c1a1bb13e434bebf07 100644 (file)
@@ -1,7 +1,7 @@
 config SCSI_CHELSIO_FCOE
        tristate "Chelsio Communications FCoE support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select FW_LOADER
        help
          This driver supports FCoE Offload functionality over
index 79788a12712d1ccc9274347f861cfb6bd9156d03..02e69e7ee4a36cc653baf90409834e7bf9175c94 100644 (file)
@@ -1647,7 +1647,7 @@ static int cxgbi_inet6addr_handler(struct notifier_block *this,
        if (event_dev->priv_flags & IFF_802_1Q_VLAN)
                event_dev = vlan_dev_real_dev(event_dev);
 
-       cdev = cxgbi_device_find_by_netdev(event_dev, NULL);
+       cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL);
 
        if (!cdev)
                return ret;
index d65df6dc106f18a4b8d41f478f98854eeb45a946..addd1dddce14b28f3900d054c5c5ec4e62be18f6 100644 (file)
@@ -57,6 +57,9 @@ MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
 static LIST_HEAD(cdev_list);
 static DEFINE_MUTEX(cdev_mutex);
 
+static LIST_HEAD(cdev_rcu_list);
+static DEFINE_SPINLOCK(cdev_rcu_lock);
+
 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
                                unsigned int max_conn)
 {
@@ -142,6 +145,10 @@ struct cxgbi_device *cxgbi_device_register(unsigned int extra,
        list_add_tail(&cdev->list_head, &cdev_list);
        mutex_unlock(&cdev_mutex);
 
+       spin_lock(&cdev_rcu_lock);
+       list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
+       spin_unlock(&cdev_rcu_lock);
+
        log_debug(1 << CXGBI_DBG_DEV,
                "cdev 0x%p, p# %u.\n", cdev, nports);
        return cdev;
@@ -153,9 +160,16 @@ void cxgbi_device_unregister(struct cxgbi_device *cdev)
        log_debug(1 << CXGBI_DBG_DEV,
                "cdev 0x%p, p# %u,%s.\n",
                cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
+
        mutex_lock(&cdev_mutex);
        list_del(&cdev->list_head);
        mutex_unlock(&cdev_mutex);
+
+       spin_lock(&cdev_rcu_lock);
+       list_del_rcu(&cdev->rcu_node);
+       spin_unlock(&cdev_rcu_lock);
+       synchronize_rcu();
+
        cxgbi_device_destroy(cdev);
 }
 EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
@@ -167,12 +181,9 @@ void cxgbi_device_unregister_all(unsigned int flag)
        mutex_lock(&cdev_mutex);
        list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
                if ((cdev->flags & flag) == flag) {
-                       log_debug(1 << CXGBI_DBG_DEV,
-                               "cdev 0x%p, p# %u,%s.\n",
-                               cdev, cdev->nports, cdev->nports ?
-                                cdev->ports[0]->name : "");
-                       list_del(&cdev->list_head);
-                       cxgbi_device_destroy(cdev);
+                       mutex_unlock(&cdev_mutex);
+                       cxgbi_device_unregister(cdev);
+                       mutex_lock(&cdev_mutex);
                }
        }
        mutex_unlock(&cdev_mutex);
@@ -191,6 +202,7 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
                }
        }
        mutex_unlock(&cdev_mutex);
+
        log_debug(1 << CXGBI_DBG_DEV,
                "lldev 0x%p, NO match found.\n", lldev);
        return NULL;
@@ -230,6 +242,39 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
 }
 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
 
+struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
+                                                    int *port)
+{
+       struct net_device *vdev = NULL;
+       struct cxgbi_device *cdev;
+       int i;
+
+       if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+               vdev = ndev;
+               ndev = vlan_dev_real_dev(ndev);
+               pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
+       }
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
+               for (i = 0; i < cdev->nports; i++) {
+                       if (ndev == cdev->ports[i]) {
+                               cdev->hbas[i]->vdev = vdev;
+                               rcu_read_unlock();
+                               if (port)
+                                       *port = i;
+                               return cdev;
+                       }
+               }
+       }
+       rcu_read_unlock();
+
+       log_debug(1 << CXGBI_DBG_DEV,
+                 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
+
 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
                                                     int *port)
 {
index b3e6e7541cc50521dac10e33f47aae9c1d42df8a..1d98fad6a0ab7198ae81ef0e8f5765fabc72f0b9 100644 (file)
@@ -527,6 +527,7 @@ struct cxgbi_ports_map {
 #define CXGBI_FLAG_IPV4_SET            0x10
 struct cxgbi_device {
        struct list_head list_head;
+       struct list_head rcu_node;
        unsigned int flags;
        struct net_device **ports;
        void *lldev;
@@ -709,6 +710,8 @@ void cxgbi_device_unregister(struct cxgbi_device *);
 void cxgbi_device_unregister_all(unsigned int flag);
 struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
 struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
+struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
+                                                    int *);
 int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
                        struct scsi_host_template *,
                        struct scsi_transport_template *);
index ea025e4806b61bb53471e79765bb3c165e31eb8f..191b5979351935aac252fbeaadf5a8dee66d6293 100644 (file)
@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                        return NULL;
                }
 
+               if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+                       iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
+                       return NULL;
+               }
+
                task = conn->login_task;
        } else {
                if (session->state != ISCSI_STATE_LOGGED_IN)
                        return NULL;
 
+               if (data_size != 0) {
+                       iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
+                       return NULL;
+               }
+
                BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
                BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
index 23d607218ae8b8a27e07b51d3752bfae17bbba26..113e6c9826a1a96e17a6a36f0f7e8a0b6438ca7e 100644 (file)
@@ -1,7 +1,7 @@
 config SCSI_QLA_FC
        tristate "QLogic QLA2XXX Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select FW_LOADER
        ---help---
        This qla2xxx driver supports all QLogic Fibre Channel
@@ -31,7 +31,7 @@ config SCSI_QLA_FC
 config TCM_QLA2XXX
        tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
        depends on SCSI_QLA_FC && TARGET_CORE
-       select LIBFC
+       depends on LIBFC
        select BTREE
        default n
        ---help---
index d837dc180522142fa1dcaf5b4e09c141effbfc4a..aaea4b98af1628c8ca8d0217ebca9a9da1708b8a 100644 (file)
@@ -733,12 +733,13 @@ static bool scsi_end_request(struct request *req, int error,
        } else {
                unsigned long flags;
 
+               if (bidi_bytes)
+                       scsi_release_bidi_buffers(cmd);
+
                spin_lock_irqsave(q->queue_lock, flags);
                blk_finish_request(req, error);
                spin_unlock_irqrestore(q->queue_lock, flags);
 
-               if (bidi_bytes)
-                       scsi_release_bidi_buffers(cmd);
                scsi_release_buffers(cmd);
                scsi_next_command(cmd);
        }
index 447458e696a980615a961576a8e648a3c8445911..7e1f120f2b32348c0043d68244602011e7b1cab7 100644 (file)
 #define GSBI_CTRL_REG          0x0000
 #define GSBI_PROTOCOL_SHIFT    4
 
+struct gsbi_info {
+       struct clk *hclk;
+       u32 mode;
+       u32 crci;
+};
+
 static int gsbi_probe(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
        struct resource *res;
        void __iomem *base;
-       struct clk *hclk;
-       u32 mode, crci = 0;
+       struct gsbi_info *gsbi;
+
+       gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+       if (!gsbi)
+               return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
-       if (of_property_read_u32(node, "qcom,mode", &mode)) {
+       if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
                dev_err(&pdev->dev, "missing mode configuration\n");
                return -EINVAL;
        }
 
        /* not required, so default to 0 if not present */
-       of_property_read_u32(node, "qcom,crci", &crci);
+       of_property_read_u32(node, "qcom,crci", &gsbi->crci);
 
-       dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci);
+       dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+                gsbi->mode, gsbi->crci);
+       gsbi->hclk = devm_clk_get(&pdev->dev, "iface");
+       if (IS_ERR(gsbi->hclk))
+               return PTR_ERR(gsbi->hclk);
 
-       hclk = devm_clk_get(&pdev->dev, "iface");
-       if (IS_ERR(hclk))
-               return PTR_ERR(hclk);
+       clk_prepare_enable(gsbi->hclk);
 
-       clk_prepare_enable(hclk);
-
-       writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci,
+       writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
                                base + GSBI_CTRL_REG);
 
        /* make sure the gsbi control write is not reordered */
        wmb();
 
-       clk_disable_unprepare(hclk);
+       platform_set_drvdata(pdev, gsbi);
+
+       return of_platform_populate(node, NULL, NULL, &pdev->dev);
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+       struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(gsbi->hclk);
 
-       return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+       return 0;
 }
 
 static const struct of_device_id gsbi_dt_match[] = {
@@ -76,6 +95,7 @@ static struct platform_driver gsbi_driver = {
                .of_match_table = gsbi_dt_match,
        },
        .probe = gsbi_probe,
+       .remove = gsbi_remove,
 };
 
 module_platform_driver(gsbi_driver);
index 48f1d26e6ad9868f9ac8c8e11fb0953e90f707da..134fb6eb7b19247dd2bf729d102fcf5e9263bf7c 100644 (file)
@@ -397,24 +397,21 @@ static int davinci_spi_setup(struct spi_device *spi)
        struct spi_master *master = spi->master;
        struct device_node *np = spi->dev.of_node;
        bool internal_cs = true;
-       unsigned long flags = GPIOF_DIR_OUT;
 
        dspi = spi_master_get_devdata(spi->master);
        pdata = &dspi->pdata;
 
-       flags |= (spi->mode & SPI_CS_HIGH) ? GPIOF_INIT_LOW : GPIOF_INIT_HIGH;
-
        if (!(spi->mode & SPI_NO_CS)) {
                if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
-                       retval = gpio_request_one(spi->cs_gpio,
-                                                 flags, dev_name(&spi->dev));
+                       retval = gpio_direction_output(
+                                     spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
                        internal_cs = false;
                } else if (pdata->chip_sel &&
                           spi->chip_select < pdata->num_chipselect &&
                           pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) {
                        spi->cs_gpio = pdata->chip_sel[spi->chip_select];
-                       retval = gpio_request_one(spi->cs_gpio,
-                                                 flags, dev_name(&spi->dev));
+                       retval = gpio_direction_output(
+                                     spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
                        internal_cs = false;
                }
 
@@ -439,12 +436,6 @@ static int davinci_spi_setup(struct spi_device *spi)
        return retval;
 }
 
-static void davinci_spi_cleanup(struct spi_device *spi)
-{
-       if (spi->cs_gpio >= 0)
-               gpio_free(spi->cs_gpio);
-}
-
 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
 {
        struct device *sdev = dspi->bitbang.master->dev.parent;
@@ -956,7 +947,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
        master->num_chipselect = pdata->num_chipselect;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
        master->setup = davinci_spi_setup;
-       master->cleanup = davinci_spi_cleanup;
 
        dspi->bitbang.chipselect = davinci_spi_chipselect;
        dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
@@ -967,6 +957,27 @@ static int davinci_spi_probe(struct platform_device *pdev)
        if (dspi->version == SPI_VERSION_2)
                dspi->bitbang.flags |= SPI_READY;
 
+       if (pdev->dev.of_node) {
+               int i;
+
+               for (i = 0; i < pdata->num_chipselect; i++) {
+                       int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+                                                       "cs-gpios", i);
+
+                       if (cs_gpio == -EPROBE_DEFER) {
+                               ret = cs_gpio;
+                               goto free_clk;
+                       }
+
+                       if (gpio_is_valid(cs_gpio)) {
+                               ret = devm_gpio_request(&pdev->dev, cs_gpio,
+                                                       dev_name(&pdev->dev));
+                               if (ret)
+                                       goto free_clk;
+                       }
+               }
+       }
+
        r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
        if (r)
                dma_rx_chan = r->start;
index 670f0627f3bfc793092495be55f3d6fd9854dd34..0dd0623319b08d708a2e869443618f43436e81aa 100644 (file)
@@ -547,8 +547,7 @@ static int dw_spi_setup(struct spi_device *spi)
        /* Only alloc on first setup */
        chip = spi_get_ctldata(spi);
        if (!chip) {
-               chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
-                               GFP_KERNEL);
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
                if (!chip)
                        return -ENOMEM;
                spi_set_ctldata(spi, chip);
@@ -606,6 +605,14 @@ static int dw_spi_setup(struct spi_device *spi)
        return 0;
 }
 
+static void dw_spi_cleanup(struct spi_device *spi)
+{
+       struct chip_data *chip = spi_get_ctldata(spi);
+
+       kfree(chip);
+       spi_set_ctldata(spi, NULL);
+}
+
 /* Restart the controller, disable all interrupts, clean rx fifo */
 static void spi_hw_init(struct dw_spi *dws)
 {
@@ -661,6 +668,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        master->bus_num = dws->bus_num;
        master->num_chipselect = dws->num_cs;
        master->setup = dw_spi_setup;
+       master->cleanup = dw_spi_cleanup;
        master->transfer_one_message = dw_spi_transfer_one_message;
        master->max_speed_hz = dws->max_freq;
 
index 8ebd724e4c59761511d55de13e55ac277a75e71a..429e111902651ba12ea41ec9c272c5ab2b3cb4ef 100644 (file)
@@ -452,16 +452,16 @@ static int fsl_espi_setup(struct spi_device *spi)
        int retval;
        u32 hw_mode;
        u32 loop_mode;
-       struct spi_mpc8xxx_cs *cs = spi->controller_state;
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
 
        if (!spi->max_speed_hz)
                return -EINVAL;
 
        if (!cs) {
-               cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
+               cs = kzalloc(sizeof(*cs), GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
-               spi->controller_state = cs;
+               spi_set_ctldata(spi, cs);
        }
 
        mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -496,6 +496,14 @@ static int fsl_espi_setup(struct spi_device *spi)
        return 0;
 }
 
+static void fsl_espi_cleanup(struct spi_device *spi)
+{
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+       kfree(cs);
+       spi_set_ctldata(spi, NULL);
+}
+
 void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
 {
        struct fsl_espi_reg *reg_base = mspi->reg_base;
@@ -605,6 +613,7 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
 
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
        master->setup = fsl_espi_setup;
+       master->cleanup = fsl_espi_cleanup;
 
        mpc8xxx_spi = spi_master_get_devdata(master);
        mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg;
index 9452f6740997b1970e8645e4cd141010b5389b38..590f31bc0aba17d6640357aca6c971cb9297c9e1 100644 (file)
@@ -425,16 +425,16 @@ static int fsl_spi_setup(struct spi_device *spi)
        struct fsl_spi_reg *reg_base;
        int retval;
        u32 hw_mode;
-       struct spi_mpc8xxx_cs   *cs = spi->controller_state;
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
 
        if (!spi->max_speed_hz)
                return -EINVAL;
 
        if (!cs) {
-               cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
+               cs = kzalloc(sizeof(*cs), GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
-               spi->controller_state = cs;
+               spi_set_ctldata(spi, cs);
        }
        mpc8xxx_spi = spi_master_get_devdata(spi->master);
 
@@ -496,9 +496,13 @@ static int fsl_spi_setup(struct spi_device *spi)
 static void fsl_spi_cleanup(struct spi_device *spi)
 {
        struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+       struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
 
        if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
                gpio_free(spi->cs_gpio);
+
+       kfree(cs);
+       spi_set_ctldata(spi, NULL);
 }
 
 static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
index 1189cfd96477c7cb53236d110b4a077b3aeb270a..f1f0a587e4fcd596d0df0f6de579ed954c17ebbc 100644 (file)
@@ -2136,7 +2136,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
                                                cs_gpio);
                                else if (gpio_direction_output(cs_gpio, 1))
                                        dev_err(&adev->dev,
-                                               "could set gpio %d as output\n",
+                                               "could not set gpio %d as output\n",
                                                cs_gpio);
                        }
                }
index cd0e08b0c9f66c7d99d1421e8fdbf882f62f7e77..3afc266b666d38f874c1f8f4d1f8f8f4dd9b2747 100644 (file)
@@ -220,7 +220,7 @@ static inline void wait_for_idle(struct rockchip_spi *rs)
        do {
                if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
                        return;
-       } while (time_before(jiffies, timeout));
+       } while (!time_after(jiffies, timeout));
 
        dev_warn(rs->dev, "spi controller is in busy state!\n");
 }
@@ -529,7 +529,8 @@ static int rockchip_spi_transfer_one(
        int ret = 0;
        struct rockchip_spi *rs = spi_master_get_devdata(master);
 
-       WARN_ON((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
+       WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
+               (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
 
        if (!xfer->tx_buf && !xfer->rx_buf) {
                dev_err(rs->dev, "No buffer for transfer\n");
index 95ac276eaafe6ab7a6c1f6b82aede8bad00bdac0..6f0602fd74012f298467397f0aca2d15709a5d15 100644 (file)
@@ -312,6 +312,8 @@ static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
        u32 cmd;
 
        sspi = spi_master_get_devdata(spi->master);
+       writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+       writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
        memcpy(&cmd, sspi->tx, t->len);
        if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
                cmd = cpu_to_be32(cmd) >>
@@ -438,7 +440,8 @@ static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
                        sspi->tx_word(sspi);
                writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
                        SIRFSOC_SPI_TX_UFLOW_INT_EN |
-                       SIRFSOC_SPI_RX_OFLOW_INT_EN,
+                       SIRFSOC_SPI_RX_OFLOW_INT_EN |
+                       SIRFSOC_SPI_RX_IO_DMA_INT_EN,
                        sspi->base + SIRFSOC_SPI_INT_EN);
                writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
                        sspi->base + SIRFSOC_SPI_TX_RX_EN);
index 19396dc4ee47f947f569a26308416f99401acb21..bed2fedeb05710f8d17313ee49e5a6e00ce7f471 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4351) },
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
index e7b2e02341962426ea6e07a351d9932a01739f9d..69139ce7420d65bb751c2691e954fd1299bd1262 100644 (file)
@@ -199,7 +199,6 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
        fence->num_fences = 1;
        atomic_set(&fence->status, 1);
 
-       fence_get(&pt->base);
        fence->cbs[0].sync_pt = &pt->base;
        fence->cbs[0].fence = fence;
        if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
index ea01b8f7a2c31d6ecbddc90637f99477be5df1f3..6f45ce0478d760bd80aee3f2551ee12a7af2be76 100644 (file)
@@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev)
        ret = iio_trigger_register(st->trig);
 
        /* select default trigger */
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
        if (ret)
                goto error_free_irq;
 
index 7e3f019d7e72543c40dd11299a051383a1a57224..4662e00b456a25d4c08b755bc68b6bdbf37e8ed2 100644 (file)
@@ -574,6 +574,9 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
        for (i = 0; i < 2; i++) {
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
+               if (!channel->connector.funcs)
+                       continue;
+
                channel->connector.funcs->destroy(&channel->connector);
                channel->encoder.funcs->destroy(&channel->encoder);
        }
index 6f393a11f44d2d57fa074defc77ce048796b36cc..50de10a550e970f5c1710653eb1449215a67e3e0 100644 (file)
@@ -281,7 +281,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
 
                ipu_idmac_put(ipu_plane->ipu_ch);
                ipu_dmfc_put(ipu_plane->dmfc);
-               ipu_dp_put(ipu_plane->dp);
+               if (ipu_plane->dp)
+                       ipu_dp_put(ipu_plane->dp);
        }
 }
 
index 0367f5a2cfe4b0760d6dfc75e53d5257399c1ca7..0c59e26c080583f85df3bf21194e777bdab7a079 100644 (file)
@@ -568,7 +568,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        if (sb->s_root == NULL) {
                CERROR("%s: can't make root dentry\n",
                        ll_get_fsname(sb, NULL, 0));
-               GOTO(out_root, err = -ENOMEM);
+               GOTO(out_lock_cn_cb, err = -ENOMEM);
        }
 
        sbi->ll_sdev_orig = sb->s_dev;
index f105c2ac091b7eaf80a99fa75cf26fee0c5543f3..164136b07a68c8243066e280dbb1293d0e1c819d 100644 (file)
@@ -350,6 +350,9 @@ static int hostap_set_generic_element(PSDevice pDevice,
 {
        PSMgmtObject    pMgmt = pDevice->pMgmt;
 
+       if (param->u.generic_elem.len > sizeof(pMgmt->abyWPAIE))
+               return -EINVAL;
+
        memcpy(pMgmt->abyWPAIE,
               param->u.generic_elem.data,
               param->u.generic_elem.len
index 1f4c794f5fcc27b26ad4260e52ba8a78548fd779..260c3e1e312c66310917238cada772696e457b6a 100644 (file)
@@ -4540,6 +4540,7 @@ static void iscsit_logout_post_handler_diffcid(
 {
        struct iscsi_conn *l_conn;
        struct iscsi_session *sess = conn->sess;
+       bool conn_found = false;
 
        if (!sess)
                return;
@@ -4548,12 +4549,13 @@ static void iscsit_logout_post_handler_diffcid(
        list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
                if (l_conn->cid == cid) {
                        iscsit_inc_conn_usage_count(l_conn);
+                       conn_found = true;
                        break;
                }
        }
        spin_unlock_bh(&sess->conn_lock);
 
-       if (!l_conn)
+       if (!conn_found)
                return;
 
        if (l_conn->sock)
index 02f9de26f38ab930b9810a20abaf8b972f42f93d..18c29260b4a213d959463b41083abe6555166985 100644 (file)
@@ -601,7 +601,7 @@ int iscsi_copy_param_list(
        param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
        if (!param_list) {
                pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
-               goto err_out;
+               return -1;
        }
        INIT_LIST_HEAD(&param_list->param_list);
        INIT_LIST_HEAD(&param_list->extra_response_list);
index fd90b28f1d94358db7361899b32928b0d1b5fee2..73355f4fca745a71d0fa3cb66956148c64e41f11 100644 (file)
@@ -400,6 +400,8 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
 
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+               if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
+                       continue;
                if (cmd->init_task_tag == init_task_tag) {
                        spin_unlock_bh(&conn->cmd_lock);
                        return cmd;
index bf55c5a04cfac68983b2c565c9f9498f6aaad1ed..756def38c77af934a3e16ef3ae9219e67ceb06cc 100644 (file)
@@ -2363,7 +2363,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
                pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
                return -EINVAL;                                         \
        }                                                               \
-       if (!tmp)                                                       \
+       if (tmp)                                                        \
                t->_var |= _bit;                                        \
        else                                                            \
                t->_var &= ~_bit;                                       \
index 6cd7222738fc4697f8c1be7c337ef7418fd09075..bc286a67af7c4e876e0ac6075e8af8fffe2b9c92 100644 (file)
@@ -664,7 +664,7 @@ spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
        buf[0] = dev->transport->get_device_type(dev);
        buf[3] = 0x0c;
        put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
-       put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
+       put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
 
        return 0;
 }
index 4db7987ec225f4d345efbb3249f0005897f95b2e..57d9df84ce5d7aca4fe25c557988742fbbb986bc 100644 (file)
@@ -540,6 +540,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
        { "INT3434", 0 },
        { "INT3435", 0 },
        { "80860F0A", 0 },
+       { "8086228A", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
index 7b63677475c1f0cc57f9a5f947d74d66cf9aad81..d7d4584549a5d77e84b02f567085bcc77da5526a 100644 (file)
@@ -526,6 +526,45 @@ static void atmel_enable_ms(struct uart_port *port)
        UART_PUT_IER(port, ier);
 }
 
+/*
+ * Disable modem status interrupts
+ */
+static void atmel_disable_ms(struct uart_port *port)
+{
+       struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+       uint32_t idr = 0;
+
+       /*
+        * Interrupt should not be disabled twice
+        */
+       if (!atmel_port->ms_irq_enabled)
+               return;
+
+       atmel_port->ms_irq_enabled = false;
+
+       if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
+       else
+               idr |= ATMEL_US_CTSIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
+       else
+               idr |= ATMEL_US_DSRIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
+       else
+               idr |= ATMEL_US_RIIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
+       else
+               idr |= ATMEL_US_DCDIC;
+
+       UART_PUT_IDR(port, idr);
+}
+
 /*
  * Control the transmission of a break signal
  */
@@ -1993,7 +2032,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
 
        /* CTS flow-control and modem-status interrupts */
        if (UART_ENABLE_MS(port, termios->c_cflag))
-               port->ops->enable_ms(port);
+               atmel_enable_ms(port);
+       else
+               atmel_disable_ms(port);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
index 01951d27cc03c8a4801dd66083c9a90c079b0332..806e4bcadbd7ee6d2d7e543798fb8a621b4e21cb 100644 (file)
@@ -581,7 +581,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
 {
        unsigned int status;
 
-       status = cdns_uart_readl(CDNS_UART_ISR_OFFSET) & CDNS_UART_IXR_TXEMPTY;
+       status = cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY;
        return status ? TIOCSER_TEMT : 0;
 }
 
index d72b9d2de2c5f828be33ea965c6ad87617722859..4935ac38fd008d17a51ed866e9fc002cddf021f5 100644 (file)
 static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
 {
        struct device *dev = ci->gadget.dev.parent;
-       int val;
 
        switch (event) {
        case CI_HDRC_CONTROLLER_RESET_EVENT:
                dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
                writel(0, USB_AHBBURST);
                writel(0, USB_AHBMODE);
+               usb_phy_init(ci->transceiver);
                break;
        case CI_HDRC_CONTROLLER_STOPPED_EVENT:
                dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
@@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
                 * Put the transceiver in non-driving mode. Otherwise host
                 * may not detect soft-disconnection.
                 */
-               val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
-               val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-               val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
-               usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
+               usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
                break;
        default:
                dev_dbg(dev, "unknown ci_hdrc event\n");
index 46f5161c78913c75678ae3416f4031099b880090..d481c99a20d7b13adc9b9ec5c00565fd7d0ecc60 100644 (file)
@@ -5024,9 +5024,10 @@ static void hub_events(void)
 
                hub = list_entry(tmp, struct usb_hub, event_list);
                kref_get(&hub->kref);
+               hdev = hub->hdev;
+               usb_get_dev(hdev);
                spin_unlock_irq(&hub_event_lock);
 
-               hdev = hub->hdev;
                hub_dev = hub->intfdev;
                intf = to_usb_interface(hub_dev);
                dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
@@ -5139,6 +5140,7 @@ static void hub_events(void)
                usb_autopm_put_interface(intf);
  loop_disconnected:
                usb_unlock_device(hdev);
+               usb_put_dev(hdev);
                kref_put(&hub->kref, hub_release);
 
        } /* end while (1) */
index 7c9618e916e23cd7eca1cc6ae098edf999a4aa06..ce6071d65d510696ebb4d428cd7240dd51a95b3e 100644 (file)
@@ -1649,6 +1649,7 @@ static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
                        dev_err(hsotg->dev,
                                "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
                                __func__, val);
+                       break;
                }
 
                udelay(1);
@@ -2747,13 +2748,14 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
 
        dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
 
-       if (hsotg->phy) {
-               phy_init(hsotg->phy);
-               phy_power_on(hsotg->phy);
-       } else if (hsotg->uphy)
+       if (hsotg->uphy)
                usb_phy_init(hsotg->uphy);
-       else if (hsotg->plat->phy_init)
+       else if (hsotg->plat && hsotg->plat->phy_init)
                hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
+       else {
+               phy_init(hsotg->phy);
+               phy_power_on(hsotg->phy);
+       }
 }
 
 /**
@@ -2767,13 +2769,14 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
 {
        struct platform_device *pdev = to_platform_device(hsotg->dev);
 
-       if (hsotg->phy) {
-               phy_power_off(hsotg->phy);
-               phy_exit(hsotg->phy);
-       } else if (hsotg->uphy)
+       if (hsotg->uphy)
                usb_phy_shutdown(hsotg->uphy);
-       else if (hsotg->plat->phy_exit)
+       else if (hsotg->plat && hsotg->plat->phy_exit)
                hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
+       else {
+               phy_power_off(hsotg->phy);
+               phy_exit(hsotg->phy);
+       }
 }
 
 /**
@@ -2892,13 +2895,11 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
                return -ENODEV;
 
        /* all endpoints should be shutdown */
-       for (ep = 0; ep < hsotg->num_of_eps; ep++)
+       for (ep = 1; ep < hsotg->num_of_eps; ep++)
                s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
 
        spin_lock_irqsave(&hsotg->lock, flags);
 
-       s3c_hsotg_phy_disable(hsotg);
-
        if (!driver)
                hsotg->driver = NULL;
 
@@ -2941,7 +2942,6 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
                s3c_hsotg_phy_enable(hsotg);
                s3c_hsotg_core_init(hsotg);
        } else {
-               s3c_hsotg_disconnect(hsotg);
                s3c_hsotg_phy_disable(hsotg);
        }
 
@@ -3441,13 +3441,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
 
        hsotg->irq = ret;
 
-       ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
-                               dev_name(dev), hsotg);
-       if (ret < 0) {
-               dev_err(dev, "cannot claim IRQ\n");
-               goto err_clk;
-       }
-
        dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
 
        hsotg->gadget.max_speed = USB_SPEED_HIGH;
@@ -3488,9 +3481,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
        if (hsotg->phy && (phy_get_bus_width(phy) == 8))
                hsotg->phyif = GUSBCFG_PHYIF8;
 
-       if (hsotg->phy)
-               phy_init(hsotg->phy);
-
        /* usb phy enable */
        s3c_hsotg_phy_enable(hsotg);
 
@@ -3498,6 +3488,17 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
        s3c_hsotg_init(hsotg);
        s3c_hsotg_hw_cfg(hsotg);
 
+       ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
+                               dev_name(dev), hsotg);
+       if (ret < 0) {
+               s3c_hsotg_phy_disable(hsotg);
+               clk_disable_unprepare(hsotg->clk);
+               regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
+                                      hsotg->supplies);
+               dev_err(dev, "cannot claim IRQ\n");
+               goto err_clk;
+       }
+
        /* hsotg->num_of_eps holds number of EPs other than ep0 */
 
        if (hsotg->num_of_eps == 0) {
@@ -3582,9 +3583,6 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
                usb_gadget_unregister_driver(hsotg->driver);
        }
 
-       s3c_hsotg_phy_disable(hsotg);
-       if (hsotg->phy)
-               phy_exit(hsotg->phy);
        clk_disable_unprepare(hsotg->clk);
 
        return 0;
index b769c1faaf0385dff2543592e2fe68745d1fc7c5..9069984fe5cf0eae313a0dfb5faa2fe06ba9766c 100644 (file)
@@ -799,20 +799,21 @@ static int dwc3_remove(struct platform_device *pdev)
 {
        struct dwc3     *dwc = platform_get_drvdata(pdev);
 
+       dwc3_debugfs_exit(dwc);
+       dwc3_core_exit_mode(dwc);
+       dwc3_event_buffers_cleanup(dwc);
+       dwc3_free_event_buffers(dwc);
+
        usb_phy_set_suspend(dwc->usb2_phy, 1);
        usb_phy_set_suspend(dwc->usb3_phy, 1);
        phy_power_off(dwc->usb2_generic_phy);
        phy_power_off(dwc->usb3_generic_phy);
 
+       dwc3_core_exit(dwc);
+
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       dwc3_debugfs_exit(dwc);
-       dwc3_core_exit_mode(dwc);
-       dwc3_event_buffers_cleanup(dwc);
-       dwc3_free_event_buffers(dwc);
-       dwc3_core_exit(dwc);
-
        return 0;
 }
 
index 9dcfbe7cd5f5d5cfc58239ee578cd68cae09a63f..fc0de3753648187ab9f1fd68ac121a83cd808e91 100644 (file)
@@ -576,9 +576,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
        if (omap->extcon_id_dev.edev)
                extcon_unregister_interest(&omap->extcon_id_dev);
        dwc3_omap_disable_irqs(omap);
+       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
 
        return 0;
 }
index 349cacc577d81a679cacd4494bb7ead82f11cfa4..490a6ca0073369844b40eff51f88b2cbfd353bce 100644 (file)
@@ -527,7 +527,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
                dep->stream_capable = true;
        }
 
-       if (usb_endpoint_xfer_isoc(desc))
+       if (!usb_endpoint_xfer_control(desc))
                params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
 
        /*
@@ -1225,16 +1225,17 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
 
        int                             ret;
 
+       spin_lock_irqsave(&dwc->lock, flags);
        if (!dep->endpoint.desc) {
                dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
                                request, ep->name);
+               spin_unlock_irqrestore(&dwc->lock, flags);
                return -ESHUTDOWN;
        }
 
        dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
                        request, ep->name, request->length);
 
-       spin_lock_irqsave(&dwc->lock, flags);
        ret = __dwc3_gadget_ep_queue(dep, req);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -2041,12 +2042,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
                dwc3_endpoint_transfer_complete(dwc, dep, event);
                break;
        case DWC3_DEPEVT_XFERINPROGRESS:
-               if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-                       dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
-                                       dep->name);
-                       return;
-               }
-
                dwc3_endpoint_transfer_complete(dwc, dep, event);
                break;
        case DWC3_DEPEVT_XFERNOTREADY:
index dc30adf15a01d22cbde3c3a195a146373e302ef0..0dc3552d13603282d3d631aa1a7d06078bec0c3b 100644 (file)
@@ -155,6 +155,12 @@ struct ffs_io_data {
        struct usb_request *req;
 };
 
+struct ffs_desc_helper {
+       struct ffs_data *ffs;
+       unsigned interfaces_count;
+       unsigned eps_count;
+};
+
 static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
 
@@ -1830,7 +1836,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                                u8 *valuep, struct usb_descriptor_header *desc,
                                void *priv)
 {
-       struct ffs_data *ffs = priv;
+       struct ffs_desc_helper *helper = priv;
+       struct usb_endpoint_descriptor *d;
 
        ENTER();
 
@@ -1844,8 +1851,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                 * encountered interface "n" then there are at least
                 * "n+1" interfaces.
                 */
-               if (*valuep >= ffs->interfaces_count)
-                       ffs->interfaces_count = *valuep + 1;
+               if (*valuep >= helper->interfaces_count)
+                       helper->interfaces_count = *valuep + 1;
                break;
 
        case FFS_STRING:
@@ -1853,14 +1860,22 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                 * Strings are indexed from 1 (0 is magic ;) reserved
                 * for languages list or some such)
                 */
-               if (*valuep > ffs->strings_count)
-                       ffs->strings_count = *valuep;
+               if (*valuep > helper->ffs->strings_count)
+                       helper->ffs->strings_count = *valuep;
                break;
 
        case FFS_ENDPOINT:
-               /* Endpoints are indexed from 1 as well. */
-               if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
-                       ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+               d = (void *)desc;
+               helper->eps_count++;
+               if (helper->eps_count >= 15)
+                       return -EINVAL;
+               /* Check if descriptors for any speed were already parsed */
+               if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
+                       helper->ffs->eps_addrmap[helper->eps_count] =
+                               d->bEndpointAddress;
+               else if (helper->ffs->eps_addrmap[helper->eps_count] !=
+                               d->bEndpointAddress)
+                       return -EINVAL;
                break;
        }
 
@@ -2053,6 +2068,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
        char *data = _data, *raw_descs;
        unsigned os_descs_count = 0, counts[3], flags;
        int ret = -EINVAL, i;
+       struct ffs_desc_helper helper;
 
        ENTER();
 
@@ -2101,13 +2117,29 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
 
        /* Read descriptors */
        raw_descs = data;
+       helper.ffs = ffs;
        for (i = 0; i < 3; ++i) {
                if (!counts[i])
                        continue;
+               helper.interfaces_count = 0;
+               helper.eps_count = 0;
                ret = ffs_do_descs(counts[i], data, len,
-                                  __ffs_data_do_entity, ffs);
+                                  __ffs_data_do_entity, &helper);
                if (ret < 0)
                        goto error;
+               if (!ffs->eps_count && !ffs->interfaces_count) {
+                       ffs->eps_count = helper.eps_count;
+                       ffs->interfaces_count = helper.interfaces_count;
+               } else {
+                       if (ffs->eps_count != helper.eps_count) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+                       if (ffs->interfaces_count != helper.interfaces_count) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+               }
                data += ret;
                len  -= ret;
        }
@@ -2342,9 +2374,18 @@ static void ffs_event_add(struct ffs_data *ffs,
        spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 }
 
-
 /* Bind/unbind USB function hooks *******************************************/
 
+static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
+{
+       int i;
+
+       for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
+               if (ffs->eps_addrmap[i] == endpoint_address)
+                       return i;
+       return -ENOENT;
+}
+
 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
                                    struct usb_descriptor_header *desc,
                                    void *priv)
@@ -2378,7 +2419,10 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
        if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
                return 0;
 
-       idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+       idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
+       if (idx < 0)
+               return idx;
+
        ffs_ep = func->eps + idx;
 
        if (unlikely(ffs_ep->descs[ep_desc_id])) {
index 63d6e71569c18d7ddcf26e92586798d30a97cbd2..d48897e8ffebf03db2f39ec77ba695364b4174a2 100644 (file)
@@ -224,6 +224,8 @@ struct ffs_data {
        void                            *ms_os_descs_ext_prop_name_avail;
        void                            *ms_os_descs_ext_prop_data_avail;
 
+       u8                              eps_addrmap[15];
+
        unsigned short                  strings_count;
        unsigned short                  interfaces_count;
        unsigned short                  eps_count;
index ae811d8d38b435d9cd198369b399b1cf58bda63c..ad39f892d20099a5047fc5a815fd9d33a03b1e65 100644 (file)
@@ -12,7 +12,7 @@
 
 
 #ifndef __FUSB300_UDC_H__
-#define __FUSB300_UDC_H_
+#define __FUSB300_UDC_H__
 
 #include <linux/kernel.h>
 
index f4eac113690ebd2e164e4768113aec6319b68290..2e95715b50c0967f02550dbe11e88c159dbd5b5c 100644 (file)
@@ -3320,7 +3320,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
        if (stat & tmp) {
                writel(tmp, &dev->regs->irqstat1);
                if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
-                               (readl(&dev->usb->usbstat) & mask)) ||
+                               ((readl(&dev->usb->usbstat) & mask) == 0)) ||
                                ((readl(&dev->usb->usbctl) &
                                        BIT(VBUS_PIN)) == 0)) &&
                                (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
index 81cda09b47e3127772e51cbc5c57eab7761c4a52..488a30836c36cbee57f2e16f7813db7075891699 100644 (file)
@@ -965,8 +965,6 @@ rescan:
        }
 
        qh->exception = 1;
-       if (ehci->rh_state < EHCI_RH_RUNNING)
-               qh->qh_state = QH_STATE_IDLE;
        switch (qh->qh_state) {
        case QH_STATE_LINKED:
                WARN_ON(!list_empty(&qh->qtd_list));
index aa79e8749040b783281ec6f968620fc41ad41876..69aece31143a12d965af1014b0c133f773ffafcf 100644 (file)
@@ -468,7 +468,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
 }
 
 /* Updates Link Status for super Speed port */
-static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
+static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+               u32 *status, u32 status_reg)
 {
        u32 pls = status_reg & PORT_PLS_MASK;
 
@@ -507,7 +508,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
                 * in which sometimes the port enters compliance mode
                 * caused by a delay on the host-device negotiation.
                 */
-               if (pls == USB_SS_PORT_LS_COMP_MOD)
+               if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                               (pls == USB_SS_PORT_LS_COMP_MOD))
                        pls |= USB_PORT_STAT_CONNECTION;
        }
 
@@ -666,7 +668,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
        }
        /* Update Port Link State */
        if (hcd->speed == HCD_USB3) {
-               xhci_hub_report_usb3_link_state(&status, raw_port_status);
+               xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
                /*
                 * Verify if all USB3 Ports Have entered U0 already.
                 * Delete Compliance Mode Timer if so.
index 8056d90690ee1bff397b0fa689f3ab9175622a6b..8936211b161d3ed6b0a2989359707287f2721b21 100644 (file)
@@ -1812,6 +1812,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 
        if (xhci->lpm_command)
                xhci_free_command(xhci, xhci->lpm_command);
+       xhci->lpm_command = NULL;
        if (xhci->cmd_ring)
                xhci_ring_free(xhci, xhci->cmd_ring);
        xhci->cmd_ring = NULL;
@@ -1819,7 +1820,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        xhci_cleanup_command_queue(xhci);
 
        num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
-       for (i = 0; i < num_ports; i++) {
+       for (i = 0; i < num_ports && xhci->rh_bw; i++) {
                struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
                for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
                        struct list_head *ep = &bwt->interval_bw[j].endpoints;
index c020b094fe7d952d91f1f977a02d35de52dff451..c4a8fca8ae939f4fefeca3498bf967ce7aabcb9f 100644 (file)
@@ -3971,13 +3971,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
        int ret;
 
        spin_lock_irqsave(&xhci->lock, flags);
-       if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+
+       virt_dev = xhci->devs[udev->slot_id];
+
+       /*
+        * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
+        * xHC was re-initialized. Exit latency will be set later after
+        * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
+        */
+
+       if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
                spin_unlock_irqrestore(&xhci->lock, flags);
                return 0;
        }
 
        /* Attempt to issue an Evaluate Context command to change the MEL. */
-       virt_dev = xhci->devs[udev->slot_id];
        command = xhci->lpm_command;
        ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
        if (!ctrl_ctx) {
index 47ae6455d0733c2d3ef4093aa4408e98969b3644..3ee133f675abee3e9c6921c4fa034855f57efba8 100644 (file)
@@ -39,6 +39,7 @@ struct cppi41_dma_channel {
        u32 transferred;
        u32 packet_sz;
        struct list_head tx_check;
+       int tx_zlp;
 };
 
 #define MUSB_DMA_NUM_CHANNELS 15
@@ -122,6 +123,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
 {
        struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
        struct musb *musb = hw_ep->musb;
+       void __iomem *epio = hw_ep->regs;
+       u16 csr;
 
        if (!cppi41_channel->prog_len ||
            (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
@@ -131,15 +134,24 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
                        cppi41_channel->transferred;
                cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
                cppi41_channel->channel.rx_packet_done = true;
+
+               /*
+                * transmit ZLP using PIO mode for transfers which size is
+                * multiple of EP packet size.
+                */
+               if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
+                                       cppi41_channel->packet_sz) == 0) {
+                       musb_ep_select(musb->mregs, hw_ep->epnum);
+                       csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
+                       musb_writew(epio, MUSB_TXCSR, csr);
+               }
                musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
        } else {
                /* next iteration, reload */
                struct dma_chan *dc = cppi41_channel->dc;
                struct dma_async_tx_descriptor *dma_desc;
                enum dma_transfer_direction direction;
-               u16 csr;
                u32 remain_bytes;
-               void __iomem *epio = cppi41_channel->hw_ep->regs;
 
                cppi41_channel->buf_addr += cppi41_channel->packet_sz;
 
@@ -363,6 +375,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
        cppi41_channel->total_len = len;
        cppi41_channel->transferred = 0;
        cppi41_channel->packet_sz = packet_sz;
+       cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
 
        /*
         * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
index c42bdf0c4a1f7322eed6ba4f7ce6b058f7c16fd9..00972eca04e7f5d4965f32fd0944c453f5291f07 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012-2014 Freescale Semiconductor, Inc.
  * Copyright (C) 2012 Marek Vasut <marex@denx.de>
  * on behalf of DENX Software Engineering GmbH
  *
@@ -125,7 +125,13 @@ static const struct mxs_phy_data imx6sl_phy_data = {
                MXS_PHY_NEED_IP_FIX,
 };
 
+static const struct mxs_phy_data imx6sx_phy_data = {
+       .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+               MXS_PHY_NEED_IP_FIX,
+};
+
 static const struct of_device_id mxs_phy_dt_ids[] = {
+       { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
        { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
        { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
        { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
index 13b4fa287da8aff05739a657fb9c24b3f5541ea7..886f1807a67bbdcb8fe5e17e91b45e382507db14 100644 (file)
@@ -878,8 +878,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
                return -ENOMEM;
        }
 
-       tegra_phy->config = devm_kzalloc(&pdev->dev,
-               sizeof(*tegra_phy->config), GFP_KERNEL);
+       tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
+                                        GFP_KERNEL);
        if (!tegra_phy->config) {
                dev_err(&pdev->dev,
                        "unable to allocate memory for USB UTMIP config\n");
index 4fd36530bfa35dfb9a83b89f7d02abd04b17fbea..b0c97a3f1bfed74e262c3aa818817259f31e987e 100644 (file)
@@ -108,19 +108,45 @@ static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
        return list_first_entry(&pipe->list, struct usbhs_pkt, node);
 }
 
+static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+                             struct usbhs_fifo *fifo);
+static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
+                                struct usbhs_fifo *fifo);
+static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
+                                           struct usbhs_pkt *pkt);
+#define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
+#define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
+static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
 {
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+       struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
        unsigned long flags;
 
        /********************  spin lock ********************/
        usbhs_lock(priv, flags);
 
+       usbhs_pipe_disable(pipe);
+
        if (!pkt)
                pkt = __usbhsf_pkt_get(pipe);
 
-       if (pkt)
+       if (pkt) {
+               struct dma_chan *chan = NULL;
+
+               if (fifo)
+                       chan = usbhsf_dma_chan_get(fifo, pkt);
+               if (chan) {
+                       dmaengine_terminate_all(chan);
+                       usbhsf_fifo_clear(pipe, fifo);
+                       usbhsf_dma_unmap(pkt);
+               }
+
                __usbhsf_pkt_del(pkt);
+       }
+
+       if (fifo)
+               usbhsf_fifo_unselect(pipe, fifo);
 
        usbhs_unlock(priv, flags);
        /********************  spin unlock ******************/
@@ -544,6 +570,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
                usbhsf_send_terminator(pipe, fifo);
 
        usbhsf_tx_irq_ctrl(pipe, !*is_done);
+       usbhs_pipe_running(pipe, !*is_done);
        usbhs_pipe_enable(pipe);
 
        dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
@@ -570,12 +597,21 @@ usbhs_fifo_write_busy:
         * retry in interrupt
         */
        usbhsf_tx_irq_ctrl(pipe, 1);
+       usbhs_pipe_running(pipe, 1);
 
        return ret;
 }
 
+static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+{
+       if (usbhs_pipe_is_running(pkt->pipe))
+               return 0;
+
+       return usbhsf_pio_try_push(pkt, is_done);
+}
+
 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
-       .prepare = usbhsf_pio_try_push,
+       .prepare = usbhsf_pio_prepare_push,
        .try_run = usbhsf_pio_try_push,
 };
 
@@ -589,6 +625,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
        if (usbhs_pipe_is_busy(pipe))
                return 0;
 
+       if (usbhs_pipe_is_running(pipe))
+               return 0;
+
        /*
         * pipe enable to prepare packet receive
         */
@@ -597,6 +636,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
 
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
        usbhs_pipe_enable(pipe);
+       usbhs_pipe_running(pipe, 1);
        usbhsf_rx_irq_ctrl(pipe, 1);
 
        return 0;
@@ -642,6 +682,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
            (total_len < maxp)) {               /* short packet */
                *is_done = 1;
                usbhsf_rx_irq_ctrl(pipe, 0);
+               usbhs_pipe_running(pipe, 0);
                usbhs_pipe_disable(pipe);       /* disable pipe first */
        }
 
@@ -763,8 +804,6 @@ static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
        usbhs_bset(priv, fifo->sel, DREQE, dreqe);
 }
 
-#define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
-#define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
@@ -805,6 +844,7 @@ static void xfer_work(struct work_struct *work)
        dev_dbg(dev, "  %s %d (%d/ %d)\n",
                fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
 
+       usbhs_pipe_running(pipe, 1);
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
        usbhs_pipe_enable(pipe);
        usbhsf_dma_start(pipe, fifo);
@@ -836,6 +876,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
        if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
                goto usbhsf_pio_prepare_push;
 
+       /* return at this time if the pipe is running */
+       if (usbhs_pipe_is_running(pipe))
+               return 0;
+
        /* get enable DMA fifo */
        fifo = usbhsf_get_dma_fifo(priv, pkt);
        if (!fifo)
@@ -869,15 +913,29 @@ usbhsf_pio_prepare_push:
 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
+       int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
+
+       pkt->actual += pkt->trans;
 
-       pkt->actual = pkt->trans;
+       if (pkt->actual < pkt->length)
+               *is_done = 0;           /* there are remainder data */
+       else if (is_short)
+               *is_done = 1;           /* short packet */
+       else
+               *is_done = !pkt->zero;  /* send zero packet? */
 
-       *is_done = !pkt->zero;  /* send zero packet ? */
+       usbhs_pipe_running(pipe, !*is_done);
 
        usbhsf_dma_stop(pipe, pipe->fifo);
        usbhsf_dma_unmap(pkt);
        usbhsf_fifo_unselect(pipe, pipe->fifo);
 
+       if (!*is_done) {
+               /* change handler to PIO */
+               pkt->handler = &usbhs_fifo_pio_push_handler;
+               return pkt->handler->try_run(pkt, is_done);
+       }
+
        return 0;
 }
 
@@ -972,8 +1030,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
        if ((pkt->actual == pkt->length) ||     /* receive all data */
            (pkt->trans < maxp)) {              /* short packet */
                *is_done = 1;
+               usbhs_pipe_running(pipe, 0);
        } else {
                /* re-enable */
+               usbhs_pipe_running(pipe, 0);
                usbhsf_prepare_pop(pkt, is_done);
        }
 
index 6a030b931a3b71d721870181d1170ba451dacc8b..9a705b15b3a1528b733c9eebac04e0d2f7ef2ee9 100644 (file)
@@ -213,7 +213,10 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
 {
        struct usbhs_mod *mod = usbhs_mod_get_current(priv);
        u16 intenb0, intenb1;
+       unsigned long flags;
 
+       /********************  spin lock ********************/
+       usbhs_lock(priv, flags);
        state->intsts0 = usbhs_read(priv, INTSTS0);
        state->intsts1 = usbhs_read(priv, INTSTS1);
 
@@ -229,6 +232,8 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
                state->bempsts &= mod->irq_bempsts;
                state->brdysts &= mod->irq_brdysts;
        }
+       usbhs_unlock(priv, flags);
+       /********************  spin unlock ******************/
 
        /*
         * Check whether the irq enable registers and the irq status are set
index 75fbcf6b102e86f8678434bd75188e71005d2504..040bcefcb0402c80116b917ea6595e1d01ed0a4d 100644 (file)
@@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
        return usbhsp_flags_has(pipe, IS_DIR_HOST);
 }
 
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe)
+{
+       return usbhsp_flags_has(pipe, IS_RUNNING);
+}
+
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running)
+{
+       if (running)
+               usbhsp_flags_set(pipe, IS_RUNNING);
+       else
+               usbhsp_flags_clr(pipe, IS_RUNNING);
+}
+
 void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
 {
        u16 mask = (SQCLR | SQSET);
index 406f36d050e4facf2c24b04fc254e2cbd639c623..d24a059723704cbae9c09c501f580c3ff0a450b7 100644 (file)
@@ -36,6 +36,7 @@ struct usbhs_pipe {
 #define USBHS_PIPE_FLAGS_IS_USED               (1 << 0)
 #define USBHS_PIPE_FLAGS_IS_DIR_IN             (1 << 1)
 #define USBHS_PIPE_FLAGS_IS_DIR_HOST           (1 << 2)
+#define USBHS_PIPE_FLAGS_IS_RUNNING            (1 << 3)
 
        struct usbhs_pkt_handle *handler;
 
@@ -80,6 +81,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv);
 void usbhs_pipe_remove(struct usbhs_priv *priv);
 int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
 int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe);
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running);
+
 void usbhs_pipe_init(struct usbhs_priv *priv,
                     int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
 int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
index 824ea5e7ec8b7b1b67a7820e6fbb691197aee030..dc72b924c399e955e8cd94f68c8337f1b7e3facd 100644 (file)
@@ -728,6 +728,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
                .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+       { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -939,6 +940,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
        /* Infineon Devices */
        { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+       /* GE Healthcare devices */
+       { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
        { }                                     /* Terminating entry */
 };
 
index 70b0b1d88ae9b383fde7f609015a9f0592797934..5937b2d242f28c14064d56b073dbbda5fb737093 100644 (file)
 #define TELLDUS_VID                    0x1781  /* Vendor ID */
 #define TELLDUS_TELLSTICK_PID          0x0C30  /* RF control dongle 433 MHz using FT232RL */
 
+/*
+ * NOVITUS printers
+ */
+#define NOVITUS_VID                    0x1a28
+#define NOVITUS_BONO_E_PID             0x6010
+
 /*
  * RT Systems programming cables for various ham radios
  */
  * ekey biometric systems GmbH (http://ekey.net/)
  */
 #define FTDI_EKEY_CONV_USB_PID         0xCB08  /* Converter USB */
+
+/*
+ * GE Healthcare devices
+ */
+#define GE_HEALTHCARE_VID              0x1901
+#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
index 6f7f01eb556a3af29634ce6e8a3e0745d6815e2e..46179a0828ebcbad9a78c11dad8044edff27664a 100644 (file)
@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = {
        /* Sierra Wireless HSPA Non-Composite Device */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
        { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
-       { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
+       /* Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
-       { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
+       /* Airprime/Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
 
index 1a132e9e947ac44ae7ac31554e39078f3cc122ca..c9bb107d5e5ccae2ffaa7823f70872c0de86d419 100644 (file)
@@ -272,6 +272,14 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
 }
 
 static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x19d2, 0xffec) },
+       { USB_DEVICE(0x19d2, 0xffee) },
+       { USB_DEVICE(0x19d2, 0xfff6) },
+       { USB_DEVICE(0x19d2, 0xfff7) },
+       { USB_DEVICE(0x19d2, 0xfff8) },
+       { USB_DEVICE(0x19d2, 0xfff9) },
+       { USB_DEVICE(0x19d2, 0xfffb) },
+       { USB_DEVICE(0x19d2, 0xfffc) },
        /* MG880 */
        { USB_DEVICE(0x19d2, 0xfffd) },
        { },
index 503ac5c8d80f7bb7bcb87832df78a71d8c14587f..8a6f371ed6e77e3ccdc99632c3cd41ebeb155213 100644 (file)
@@ -59,10 +59,6 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        unsigned long flags = id->driver_info;
        int r, alt;
 
-       usb_stor_adjust_quirks(udev, &flags);
-
-       if (flags & US_FL_IGNORE_UAS)
-               return 0;
 
        alt = uas_find_uas_alt_setting(intf);
        if (alt < 0)
@@ -72,6 +68,29 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        if (r < 0)
                return 0;
 
+       /*
+        * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
+        * broken on the ASM1051, use the number of streams to differentiate.
+        * New ASM1053-s also support 32 streams, but have a different prod-id.
+        */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
+                       le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
+               if (udev->speed < USB_SPEED_SUPER) {
+                       /* No streams info, assume ASM1051 */
+                       flags |= US_FL_IGNORE_UAS;
+               } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+                       flags |= US_FL_IGNORE_UAS;
+               }
+       }
+
+       usb_stor_adjust_quirks(udev, &flags);
+
+       if (flags & US_FL_IGNORE_UAS) {
+               dev_warn(&udev->dev,
+                       "UAS is blacklisted for this device, using usb-storage instead\n");
+               return 0;
+       }
+
        if (udev->bus->sg_tablesize == 0) {
                dev_warn(&udev->dev,
                        "The driver for the USB controller %s does not support scatter-gather which is\n",
index 3f42785f653c09585d5e6a0461bbb03044b7677d..9bfa7252f7f914ec666706fd80f4bd932f5a74d5 100644 (file)
@@ -970,6 +970,13 @@ static struct scsi_host_template uas_host_template = {
        .cmd_per_lun = 1,       /* until we override it */
        .skip_settle_delay = 1,
        .ordered_tag = 1,
+
+       /*
+        * The uas drivers expects tags not to be bigger than the maximum
+        * per-device queue depth, which is not true with the blk-mq tag
+        * allocator.
+        */
+       .disable_blk_mq = true,
 };
 
 #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
index 7ef99b2f3aaf75ff3222db35292ca3edaa7ac607..4a5c68a47e46f90a53c25a299d90a93170ffd825 100644 (file)
@@ -101,6 +101,12 @@ UNUSUAL_DEV(  0x03f0, 0x4002, 0x0001, 0x0001,
                "PhotoSmart R707",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY),
 
+UNUSUAL_DEV(  0x03f3, 0x0001, 0x0000, 0x9999,
+               "Adaptec",
+               "USBConnect 2000",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
  * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
  * for USB floppies that need the SINGLE_LUN enforcement.
@@ -741,6 +747,12 @@ UNUSUAL_DEV(  0x059b, 0x0001, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_SINGLE_LUN ),
 
+UNUSUAL_DEV(  0x059b, 0x0040, 0x0100, 0x0100,
+               "Iomega",
+               "Jaz USB Adapter",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
+
 /* Reported by <Hendryk.Pfeiffer@gmx.de> */
 UNUSUAL_DEV(  0x059f, 0x0643, 0x0000, 0x0000,
                "LaCie",
@@ -1119,6 +1131,18 @@ UNUSUAL_DEV(  0x0851, 0x1543, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NOT_LOCKABLE),
 
+UNUSUAL_DEV(  0x085a, 0x0026, 0x0100, 0x0133,
+               "Xircom",
+               "PortGear USB-SCSI (Mac USB Dock)",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
+UNUSUAL_DEV(  0x085a, 0x0028, 0x0100, 0x0133,
+               "Xircom",
+               "PortGear USB to SCSI Converter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Submitted by Jan De Luyck <lkml@kcore.org> */
 UNUSUAL_DEV(  0x08bd, 0x1100, 0x0000, 0x0000,
                "CITIZEN",
@@ -1958,6 +1982,14 @@ UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
 
+/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
+ * and Mac USB Dock USB-SCSI */
+UNUSUAL_DEV(  0x1645, 0x0007, 0x0100, 0x0133,
+               "Entrega Technologies",
+               "USB to SCSI Converter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Robert Schedel <r.schedel@yahoo.de>
  * Note: this is a 'super top' device like the above 14cd/6600 device */
 UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
@@ -1980,6 +2012,12 @@ UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
 
+UNUSUAL_DEV(  0x1822, 0x0001, 0x0000, 0x9999,
+               "Ariston Technologies",
+               "iConnect USB to SCSI adapter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Hans de Goede <hdegoede@redhat.com>
  * These Appotech controllers are found in Picture Frames, they provide a
  * (buggy) emulation of a cdrom drive which contains the windows software
index 80079b8fed155c39f109bd65d12e4e5e155a1b3c..d0303f0dbe15348d7eee118c74a94e0fbfbd6f45 100644 (file)
@@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
        uwb_dev->mac_addr = *bce->mac_addr;
        uwb_dev->dev_addr = bce->dev_addr;
        dev_set_name(&uwb_dev->dev, "%s", macbuf);
+
+       /* plug the beacon cache */
+       bce->uwb_dev = uwb_dev;
+       uwb_dev->bce = bce;
+       uwb_bce_get(bce);               /* released in uwb_dev_sys_release() */
+
        result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
        if (result < 0) {
                dev_err(dev, "new device %s: cannot instantiate device\n",
                        macbuf);
                goto error_dev_add;
        }
-       /* plug the beacon cache */
-       bce->uwb_dev = uwb_dev;
-       uwb_dev->bce = bce;
-       uwb_bce_get(bce);               /* released in uwb_dev_sys_release() */
+
        dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
                 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
                 dev_name(rc->uwb_dev.dev.parent));
@@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
        return;
 
 error_dev_add:
+       bce->uwb_dev = NULL;
+       uwb_bce_put(bce);
        kfree(uwb_dev);
        return;
 }
index a7b6217ac87b4086eae386ed6ebaafe9cf500481..6ad23bd3523a991b8fc9096adc272708e83c85dc 100644 (file)
@@ -639,9 +639,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
                if (g0 != panels[i].g0)
                        continue;
                if (r0 == panels[i].r0 && b0 == panels[i].b0)
-                       fb->panel->caps = panels[i].caps & CLCD_CAP_RGB;
-               if (r0 == panels[i].b0 && b0 == panels[i].r0)
-                       fb->panel->caps = panels[i].caps & CLCD_CAP_BGR;
+                       fb->panel->caps = panels[i].caps;
        }
 
        return fb->panel->caps ? 0 : -EINVAL;
index 5c660c77f03b58a32c24749b7053df6608a23230..1e0a317d3dcdda5b6041ae8481006f1d1479934b 100644 (file)
@@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit)
        rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 
        if (rc) {
-               pr_info("%s: add_memory() failed: %i\n", __func__, rc);
-               return BP_EAGAIN;
+               pr_warn("Cannot add additional memory (%i)\n", rc);
+               return BP_ECANCELED;
        }
 
        balloon_hotplug -= credit;
index 787d17945418b19c53d408d710e2558a6bb85ad8..e53fe191738cfe8ce7c7cfe6202c2651258ff952 100644 (file)
@@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
        int i, rc, readonly;
        LIST_HEAD(queue_gref);
        LIST_HEAD(queue_file);
-       struct gntalloc_gref *gref;
+       struct gntalloc_gref *gref, *next;
 
        readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
        rc = -ENOMEM;
@@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
                        goto undo;
 
                /* Grant foreign access to the page. */
-               gref->gref_id = gnttab_grant_foreign_access(op->domid,
+               rc = gnttab_grant_foreign_access(op->domid,
                        pfn_to_mfn(page_to_pfn(gref->page)), readonly);
-               if ((int)gref->gref_id < 0) {
-                       rc = gref->gref_id;
+               if (rc < 0)
                        goto undo;
-               }
-               gref_ids[i] = gref->gref_id;
+               gref_ids[i] = gref->gref_id = rc;
        }
 
        /* Add to gref lists. */
@@ -162,8 +160,8 @@ undo:
        mutex_lock(&gref_mutex);
        gref_size -= (op->count - i);
 
-       list_for_each_entry(gref, &queue_file, next_file) {
-               /* __del_gref does not remove from queue_file */
+       list_for_each_entry_safe(gref, next, &queue_file, next_file) {
+               list_del(&gref->next_file);
                __del_gref(gref);
        }
 
@@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref)
 
        gref->notify.flags = 0;
 
-       if (gref->gref_id > 0) {
+       if (gref->gref_id) {
                if (gnttab_query_foreign_access(gref->gref_id))
                        return;
 
index 5f1e1f3cd18619ed2899dcc5e967ca274f690189..f8bb36f9d9cef862c6c502b5ba25b7d38476cd85 100644 (file)
@@ -103,16 +103,11 @@ static void do_suspend(void)
 
        shutting_down = SHUTDOWN_SUSPEND;
 
-#ifdef CONFIG_PREEMPT
-       /* If the kernel is preemptible, we need to freeze all the processes
-          to prevent them from being in the middle of a pagetable update
-          during suspend. */
        err = freeze_processes();
        if (err) {
                pr_err("%s: freeze failed %d\n", __func__, err);
                goto out;
        }
-#endif
 
        err = dpm_suspend_start(PMSG_FREEZE);
        if (err) {
@@ -157,10 +152,8 @@ out_resume:
        dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
 
 out_thaw:
-#ifdef CONFIG_PREEMPT
        thaw_processes();
 out:
-#endif
        shutting_down = SHUTDOWN_INVALID;
 }
 #endif /* CONFIG_HIBERNATE_CALLBACKS */
index 43527fd78825b690503347426118e09f76017c95..56b8522d5767b5858928d4152c0ab64ca43886ca 100644 (file)
@@ -234,8 +234,17 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
            BTRFS_I(inode)->last_sub_trans <=
            BTRFS_I(inode)->last_log_commit &&
            BTRFS_I(inode)->last_sub_trans <=
-           BTRFS_I(inode)->root->last_log_commit)
-               return 1;
+           BTRFS_I(inode)->root->last_log_commit) {
+               /*
+                * After a ranged fsync we might have left some extent maps
+                * (that fall outside the fsync's range). So return false
+                * here if the list isn't empty, to make sure btrfs_log_inode()
+                * will be called and process those extent maps.
+                */
+               smp_mb();
+               if (list_empty(&BTRFS_I(inode)->extent_tree.modified_extents))
+                       return 1;
+       }
        return 0;
 }
 
index 36861b7a6757673189737024b62fcce9842bc3d0..ff1cc0399b9a206c127b4707b6dcd69d455d8231 100644 (file)
@@ -1966,7 +1966,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        btrfs_init_log_ctx(&ctx);
 
-       ret = btrfs_log_dentry_safe(trans, root, dentry, &ctx);
+       ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
                /* Fallthrough and commit/free transaction. */
                ret = 1;
index 9c194bd74d6e513c075b077b62ee1754cd02e966..016c403bfe7e4241b33c6b6c3e4101ba5ea993b2 100644 (file)
@@ -778,8 +778,12 @@ retry:
                                                ins.offset,
                                                BTRFS_ORDERED_COMPRESSED,
                                                async_extent->compress_type);
-               if (ret)
+               if (ret) {
+                       btrfs_drop_extent_cache(inode, async_extent->start,
+                                               async_extent->start +
+                                               async_extent->ram_size - 1, 0);
                        goto out_free_reserve;
+               }
 
                /*
                 * clear dirty, set writeback and unlock the pages.
@@ -971,14 +975,14 @@ static noinline int cow_file_range(struct inode *inode,
                ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
                                               ram_size, cur_alloc_size, 0);
                if (ret)
-                       goto out_reserve;
+                       goto out_drop_extent_cache;
 
                if (root->root_key.objectid ==
                    BTRFS_DATA_RELOC_TREE_OBJECTID) {
                        ret = btrfs_reloc_clone_csums(inode, start,
                                                      cur_alloc_size);
                        if (ret)
-                               goto out_reserve;
+                               goto out_drop_extent_cache;
                }
 
                if (disk_num_bytes < cur_alloc_size)
@@ -1006,6 +1010,8 @@ static noinline int cow_file_range(struct inode *inode,
 out:
        return ret;
 
+out_drop_extent_cache:
+       btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
 out_reserve:
        btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_unlock:
@@ -4242,7 +4248,8 @@ out:
                        btrfs_abort_transaction(trans, root, ret);
        }
 error:
-       if (last_size != (u64)-1)
+       if (last_size != (u64)-1 &&
+           root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
                btrfs_ordered_update_i_size(inode, last_size, NULL);
        btrfs_free_path(path);
        return err;
@@ -5627,6 +5634,17 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
        return ret;
 }
 
+static int btrfs_insert_inode_locked(struct inode *inode)
+{
+       struct btrfs_iget_args args;
+       args.location = &BTRFS_I(inode)->location;
+       args.root = BTRFS_I(inode)->root;
+
+       return insert_inode_locked4(inode,
+                  btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
+                  btrfs_find_actor, &args);
+}
+
 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
                                     struct inode *dir,
@@ -5719,10 +5737,19 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                sizes[1] = name_len + sizeof(*ref);
        }
 
+       location = &BTRFS_I(inode)->location;
+       location->objectid = objectid;
+       location->offset = 0;
+       btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
+
+       ret = btrfs_insert_inode_locked(inode);
+       if (ret < 0)
+               goto fail;
+
        path->leave_spinning = 1;
        ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
        if (ret != 0)
-               goto fail;
+               goto fail_unlock;
 
        inode_init_owner(inode, dir, mode);
        inode_set_bytes(inode, 0);
@@ -5745,11 +5772,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(path->nodes[0]);
        btrfs_free_path(path);
 
-       location = &BTRFS_I(inode)->location;
-       location->objectid = objectid;
-       location->offset = 0;
-       btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
-
        btrfs_inherit_iflags(inode, dir);
 
        if (S_ISREG(mode)) {
@@ -5760,7 +5782,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                BTRFS_INODE_NODATASUM;
        }
 
-       btrfs_insert_inode_hash(inode);
        inode_tree_add(inode);
 
        trace_btrfs_inode_new(inode);
@@ -5775,6 +5796,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                          btrfs_ino(inode), root->root_key.objectid, ret);
 
        return inode;
+
+fail_unlock:
+       unlock_new_inode(inode);
 fail:
        if (dir && name)
                BTRFS_I(dir)->index_cnt--;
@@ -5909,28 +5933,28 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock;
-       }
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
        * if the filesystem supports xattrs by looking at the
        * ops vector.
        */
-
        inode->i_op = &btrfs_special_inode_operations;
-       err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+       init_special_inode(inode, inode->i_mode, rdev);
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               drop_inode = 1;
-       else {
-               init_special_inode(inode, inode->i_mode, rdev);
+               goto out_unlock_inode;
+
+       err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+       if (err) {
+               goto out_unlock_inode;
+       } else {
                btrfs_update_inode(trans, root, inode);
+               unlock_new_inode(inode);
                d_instantiate(dentry, inode);
        }
+
 out_unlock:
        btrfs_end_transaction(trans, root);
        btrfs_balance_delayed_items(root);
@@ -5940,6 +5964,12 @@ out_unlock:
                iput(inode);
        }
        return err;
+
+out_unlock_inode:
+       drop_inode = 1;
+       unlock_new_inode(inode);
+       goto out_unlock;
+
 }
 
 static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -5974,15 +6004,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
        drop_inode_on_err = 1;
-
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err)
-               goto out_unlock;
-
-       err = btrfs_update_inode(trans, root, inode);
-       if (err)
-               goto out_unlock;
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -5991,14 +6012,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        */
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
+       inode->i_mapping->a_ops = &btrfs_aops;
+       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+       if (err)
+               goto out_unlock_inode;
+
+       err = btrfs_update_inode(trans, root, inode);
+       if (err)
+               goto out_unlock_inode;
 
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
-               goto out_unlock;
+               goto out_unlock_inode;
 
-       inode->i_mapping->a_ops = &btrfs_aops;
-       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
 
 out_unlock:
@@ -6010,6 +6040,11 @@ out_unlock:
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_unlock_inode:
+       unlock_new_inode(inode);
+       goto out_unlock;
+
 }
 
 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6117,25 +6152,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        }
 
        drop_on_err = 1;
+       /* these must be set before we unlock the inode */
+       inode->i_op = &btrfs_dir_inode_operations;
+       inode->i_fop = &btrfs_dir_file_operations;
 
        err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               goto out_fail;
-
-       inode->i_op = &btrfs_dir_inode_operations;
-       inode->i_fop = &btrfs_dir_file_operations;
+               goto out_fail_inode;
 
        btrfs_i_size_write(inode, 0);
        err = btrfs_update_inode(trans, root, inode);
        if (err)
-               goto out_fail;
+               goto out_fail_inode;
 
        err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
                             dentry->d_name.len, 0, index);
        if (err)
-               goto out_fail;
+               goto out_fail_inode;
 
        d_instantiate(dentry, inode);
+       /*
+        * mkdir is special.  We're unlocking after we call d_instantiate
+        * to avoid a race with nfsd calling d_instantiate.
+        */
+       unlock_new_inode(inode);
        drop_on_err = 0;
 
 out_fail:
@@ -6145,6 +6185,10 @@ out_fail:
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_fail_inode:
+       unlock_new_inode(inode);
+       goto out_fail;
 }
 
 /* helper for btfs_get_extent.  Given an existing extent in the tree,
@@ -8100,6 +8144,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
 
        set_nlink(inode, 1);
        btrfs_i_size_write(inode, 0);
+       unlock_new_inode(inode);
 
        err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
        if (err)
@@ -8760,12 +8805,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock;
-       }
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -8774,23 +8813,22 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        */
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
+       inode->i_mapping->a_ops = &btrfs_aops;
+       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+       BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+       if (err)
+               goto out_unlock_inode;
 
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
-               drop_inode = 1;
-       else {
-               inode->i_mapping->a_ops = &btrfs_aops;
-               inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
-       }
-       if (drop_inode)
-               goto out_unlock;
+               goto out_unlock_inode;
 
        path = btrfs_alloc_path();
        if (!path) {
                err = -ENOMEM;
-               drop_inode = 1;
-               goto out_unlock;
+               goto out_unlock_inode;
        }
        key.objectid = btrfs_ino(inode);
        key.offset = 0;
@@ -8799,9 +8837,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        err = btrfs_insert_empty_item(trans, root, path, &key,
                                      datasize);
        if (err) {
-               drop_inode = 1;
                btrfs_free_path(path);
-               goto out_unlock;
+               goto out_unlock_inode;
        }
        leaf = path->nodes[0];
        ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -8825,12 +8862,15 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        inode_set_bytes(inode, name_len);
        btrfs_i_size_write(inode, name_len);
        err = btrfs_update_inode(trans, root, inode);
-       if (err)
+       if (err) {
                drop_inode = 1;
+               goto out_unlock_inode;
+       }
+
+       unlock_new_inode(inode);
+       d_instantiate(dentry, inode);
 
 out_unlock:
-       if (!err)
-               d_instantiate(dentry, inode);
        btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
@@ -8838,6 +8878,11 @@ out_unlock:
        }
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_unlock_inode:
+       drop_inode = 1;
+       unlock_new_inode(inode);
+       goto out_unlock;
 }
 
 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -9021,14 +9066,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
                goto out;
        }
 
-       ret = btrfs_init_inode_security(trans, inode, dir, NULL);
-       if (ret)
-               goto out;
-
-       ret = btrfs_update_inode(trans, root, inode);
-       if (ret)
-               goto out;
-
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
 
@@ -9036,9 +9073,16 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
 
+       ret = btrfs_init_inode_security(trans, inode, dir, NULL);
+       if (ret)
+               goto out_inode;
+
+       ret = btrfs_update_inode(trans, root, inode);
+       if (ret)
+               goto out_inode;
        ret = btrfs_orphan_add(trans, inode);
        if (ret)
-               goto out;
+               goto out_inode;
 
        /*
         * We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -9048,6 +9092,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
         *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
         */
        set_nlink(inode, 1);
+       unlock_new_inode(inode);
        d_tmpfile(dentry, inode);
        mark_inode_dirty(inode);
 
@@ -9057,8 +9102,12 @@ out:
                iput(inode);
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
-
        return ret;
+
+out_inode:
+       unlock_new_inode(inode);
+       goto out;
+
 }
 
 static const struct inode_operations btrfs_dir_inode_operations = {
index fce6fd0e3f50c729adcf278b1b2a13bd2ede1494..8a8e29878c34283812f8d990c2432cff94bbab2a 100644 (file)
@@ -1019,8 +1019,10 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
                return false;
 
        next = defrag_lookup_extent(inode, em->start + em->len);
-       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE ||
-           (em->block_start + em->block_len == next->block_start))
+       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+               ret = false;
+       else if ((em->block_start + em->block_len == next->block_start) &&
+                (em->block_len > 128 * 1024 && next->block_len > 128 * 1024))
                ret = false;
 
        free_extent_map(next);
@@ -1055,7 +1057,6 @@ static int should_defrag_range(struct inode *inode, u64 start, int thresh,
        }
 
        next_mergeable = defrag_check_next_extent(inode, em);
-
        /*
         * we hit a real extent, if it is big or the next extent is not a
         * real extent, don't bother defragging it
@@ -1702,7 +1703,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
            ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
              BTRFS_SUBVOL_QGROUP_INHERIT)) {
                ret = -EOPNOTSUPP;
-               goto out;
+               goto free_args;
        }
 
        if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
@@ -1712,27 +1713,31 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
        if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
                if (vol_args->size > PAGE_CACHE_SIZE) {
                        ret = -EINVAL;
-                       goto out;
+                       goto free_args;
                }
                inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
                if (IS_ERR(inherit)) {
                        ret = PTR_ERR(inherit);
-                       goto out;
+                       goto free_args;
                }
        }
 
        ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
                                              vol_args->fd, subvol, ptr,
                                              readonly, inherit);
+       if (ret)
+               goto free_inherit;
 
-       if (ret == 0 && ptr &&
-           copy_to_user(arg +
-                        offsetof(struct btrfs_ioctl_vol_args_v2,
-                                 transid), ptr, sizeof(*ptr)))
+       if (ptr && copy_to_user(arg +
+                               offsetof(struct btrfs_ioctl_vol_args_v2,
+                                       transid),
+                               ptr, sizeof(*ptr)))
                ret = -EFAULT;
-out:
-       kfree(vol_args);
+
+free_inherit:
        kfree(inherit);
+free_args:
+       kfree(vol_args);
        return ret;
 }
 
@@ -2652,7 +2657,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args)) {
                ret = PTR_ERR(vol_args);
-               goto out;
+               goto err_drop;
        }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
@@ -2670,6 +2675,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
 
 out:
        kfree(vol_args);
+err_drop:
        mnt_drop_write_file(file);
        return ret;
 }
index 7e0e6e3029dd4e36e9fd72e68b151ac991b66c12..1d1ba083ca6ecc1497bd1c141a431a2d5fa26cd9 100644 (file)
 #define LOG_WALK_REPLAY_ALL 3
 
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root, struct inode *inode,
-                            int inode_only);
+                          struct btrfs_root *root, struct inode *inode,
+                          int inode_only,
+                          const loff_t start,
+                          const loff_t end);
 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
                             struct btrfs_path *path, u64 objectid);
@@ -3858,8 +3860,10 @@ process:
  * This handles both files and directories.
  */
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root, struct inode *inode,
-                            int inode_only)
+                          struct btrfs_root *root, struct inode *inode,
+                          int inode_only,
+                          const loff_t start,
+                          const loff_t end)
 {
        struct btrfs_path *path;
        struct btrfs_path *dst_path;
@@ -3876,6 +3880,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        int ins_nr;
        bool fast_search = false;
        u64 ino = btrfs_ino(inode);
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -4049,13 +4054,35 @@ log_extents:
                        goto out_unlock;
                }
        } else if (inode_only == LOG_INODE_ALL) {
-               struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
                struct extent_map *em, *n;
 
-               write_lock(&tree->lock);
-               list_for_each_entry_safe(em, n, &tree->modified_extents, list)
-                       list_del_init(&em->list);
-               write_unlock(&tree->lock);
+               write_lock(&em_tree->lock);
+               /*
+                * We can't just remove every em if we're called for a ranged
+                * fsync - that is, one that doesn't cover the whole possible
+                * file range (0 to LLONG_MAX). This is because we can have
+                * em's that fall outside the range we're logging and therefore
+                * their ordered operations haven't completed yet
+                * (btrfs_finish_ordered_io() not invoked yet). This means we
+                * didn't get their respective file extent item in the fs/subvol
+                * tree yet, and need to let the next fast fsync (one which
+                * consults the list of modified extent maps) find the em so
+                * that it logs a matching file extent item and waits for the
+                * respective ordered operation to complete (if it's still
+                * running).
+                *
+                * Removing every em outside the range we're logging would make
+                * the next fast fsync not log their matching file extent items,
+                * therefore making us lose data after a log replay.
+                */
+               list_for_each_entry_safe(em, n, &em_tree->modified_extents,
+                                        list) {
+                       const u64 mod_end = em->mod_start + em->mod_len - 1;
+
+                       if (em->mod_start >= start && mod_end <= end)
+                               list_del_init(&em->list);
+               }
+               write_unlock(&em_tree->lock);
        }
 
        if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
@@ -4065,6 +4092,7 @@ log_extents:
                        goto out_unlock;
                }
        }
+
        BTRFS_I(inode)->logged_trans = trans->transid;
        BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
 out_unlock:
@@ -4161,7 +4189,10 @@ out:
  */
 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
                                  struct btrfs_root *root, struct inode *inode,
-                                 struct dentry *parent, int exists_only,
+                                 struct dentry *parent,
+                                 const loff_t start,
+                                 const loff_t end,
+                                 int exists_only,
                                  struct btrfs_log_ctx *ctx)
 {
        int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
@@ -4207,7 +4238,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
        if (ret)
                goto end_no_trans;
 
-       ret = btrfs_log_inode(trans, root, inode, inode_only);
+       ret = btrfs_log_inode(trans, root, inode, inode_only, start, end);
        if (ret)
                goto end_trans;
 
@@ -4235,7 +4266,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 
                if (BTRFS_I(inode)->generation >
                    root->fs_info->last_trans_committed) {
-                       ret = btrfs_log_inode(trans, root, inode, inode_only);
+                       ret = btrfs_log_inode(trans, root, inode, inode_only,
+                                             0, LLONG_MAX);
                        if (ret)
                                goto end_trans;
                }
@@ -4269,13 +4301,15 @@ end_no_trans:
  */
 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, struct dentry *dentry,
+                         const loff_t start,
+                         const loff_t end,
                          struct btrfs_log_ctx *ctx)
 {
        struct dentry *parent = dget_parent(dentry);
        int ret;
 
        ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
-                                    0, ctx);
+                                    start, end, 0, ctx);
        dput(parent);
 
        return ret;
@@ -4512,6 +4546,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                    root->fs_info->last_trans_committed))
                return 0;
 
-       return btrfs_log_inode_parent(trans, root, inode, parent, 1, NULL);
+       return btrfs_log_inode_parent(trans, root, inode, parent, 0,
+                                     LLONG_MAX, 1, NULL);
 }
 
index 7f5b41bd5373810a04159d8c28facbd289c5c03c..e2e798ae7cd7c6c989a6633fe7dae682ab4a5148 100644 (file)
@@ -59,6 +59,8 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
 int btrfs_recover_log_trees(struct btrfs_root *tree_root);
 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, struct dentry *dentry,
+                         const loff_t start,
+                         const loff_t end,
                          struct btrfs_log_ctx *ctx);
 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
index 340a92d08e84d9e5268a18fe5f3112465fa47888..2c2d6d1d8eee929fc910a82cd17fbec79803cbb2 100644 (file)
@@ -529,12 +529,12 @@ static noinline int device_list_add(const char *path,
                 */
 
                /*
-                * As of now don't allow update to btrfs_fs_device through
-                * the btrfs dev scan cli, after FS has been mounted.
+                * For now, we do allow update to btrfs_fs_device through the
+                * btrfs dev scan cli after FS has been mounted.  We're still
+                * tracking a problem where systems fail mount by subvolume id
+                * when we reject replacement on a mounted FS.
                 */
-               if (fs_devices->opened) {
-                       return -EBUSY;
-               } else {
+               if (!fs_devices->opened && found_transid < device->generation) {
                        /*
                         * That is if the FS is _not_ mounted and if you
                         * are here, that means there is more than one
@@ -542,8 +542,7 @@ static noinline int device_list_add(const char *path,
                         * with larger generation number or the last-in if
                         * generation are equal.
                         */
-                       if (found_transid < device->generation)
-                               return -EEXIST;
+                       return -EEXIST;
                }
 
                name = rcu_string_strdup(path, GFP_NOFS);
index 8f05111bbb8bd36391ae01184ee5dcc9a6cfb40e..3588a80854b27acb18d185d437662d03668eddae 100644 (file)
@@ -1022,7 +1022,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
                bh = page_buffers(page);
                if (bh->b_size == size) {
                        end_block = init_page_buffers(page, bdev,
-                                               index << sizebits, size);
+                                               (sector_t)index << sizebits,
+                                               size);
                        goto done;
                }
                if (!try_to_free_buffers(page))
@@ -1043,7 +1044,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
         */
        spin_lock(&inode->i_mapping->private_lock);
        link_dev_buffers(page, bh);
-       end_block = init_page_buffers(page, bdev, index << sizebits, size);
+       end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
+                       size);
        spin_unlock(&inode->i_mapping->private_lock);
 done:
        ret = (block < end_block) ? 1 : -ENXIO;
index d749731dc0ee89d5683d5be60192b66a1d02ea82..fbb08e97438d36eb0bc783386bf68d1521fb9a81 100644 (file)
@@ -50,18 +50,18 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
               cache->brun_percent  < 100);
 
        if (*args) {
-               pr_err("'bind' command doesn't take an argument");
+               pr_err("'bind' command doesn't take an argument\n");
                return -EINVAL;
        }
 
        if (!cache->rootdirname) {
-               pr_err("No cache directory specified");
+               pr_err("No cache directory specified\n");
                return -EINVAL;
        }
 
        /* don't permit already bound caches to be re-bound */
        if (test_bit(CACHEFILES_READY, &cache->flags)) {
-               pr_err("Cache already bound");
+               pr_err("Cache already bound\n");
                return -EBUSY;
        }
 
@@ -248,7 +248,7 @@ error_open_root:
        kmem_cache_free(cachefiles_object_jar, fsdef);
 error_root_object:
        cachefiles_end_secure(cache, saved_cred);
-       pr_err("Failed to register: %d", ret);
+       pr_err("Failed to register: %d\n", ret);
        return ret;
 }
 
index b078d3081d6cd287a07c4c36cb5af5dde01c2122..ce1b115dcc28bc2968009e5e8182004da99cf29e 100644 (file)
@@ -315,7 +315,7 @@ static unsigned int cachefiles_daemon_poll(struct file *file,
 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
                                         char *args)
 {
-       pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%");
+       pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
 
        return -EINVAL;
 }
@@ -475,12 +475,12 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
        _enter(",%s", args);
 
        if (!*args) {
-               pr_err("Empty directory specified");
+               pr_err("Empty directory specified\n");
                return -EINVAL;
        }
 
        if (cache->rootdirname) {
-               pr_err("Second cache directory specified");
+               pr_err("Second cache directory specified\n");
                return -EEXIST;
        }
 
@@ -503,12 +503,12 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
        _enter(",%s", args);
 
        if (!*args) {
-               pr_err("Empty security context specified");
+               pr_err("Empty security context specified\n");
                return -EINVAL;
        }
 
        if (cache->secctx) {
-               pr_err("Second security context specified");
+               pr_err("Second security context specified\n");
                return -EINVAL;
        }
 
@@ -531,7 +531,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
        _enter(",%s", args);
 
        if (!*args) {
-               pr_err("Empty tag specified");
+               pr_err("Empty tag specified\n");
                return -EINVAL;
        }
 
@@ -562,12 +562,12 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
                goto inval;
 
        if (!test_bit(CACHEFILES_READY, &cache->flags)) {
-               pr_err("cull applied to unready cache");
+               pr_err("cull applied to unready cache\n");
                return -EIO;
        }
 
        if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
-               pr_err("cull applied to dead cache");
+               pr_err("cull applied to dead cache\n");
                return -EIO;
        }
 
@@ -587,11 +587,11 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
 
 notdir:
        path_put(&path);
-       pr_err("cull command requires dirfd to be a directory");
+       pr_err("cull command requires dirfd to be a directory\n");
        return -ENOTDIR;
 
 inval:
-       pr_err("cull command requires dirfd and filename");
+       pr_err("cull command requires dirfd and filename\n");
        return -EINVAL;
 }
 
@@ -614,7 +614,7 @@ static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
        return 0;
 
 inval:
-       pr_err("debug command requires mask");
+       pr_err("debug command requires mask\n");
        return -EINVAL;
 }
 
@@ -634,12 +634,12 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
                goto inval;
 
        if (!test_bit(CACHEFILES_READY, &cache->flags)) {
-               pr_err("inuse applied to unready cache");
+               pr_err("inuse applied to unready cache\n");
                return -EIO;
        }
 
        if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
-               pr_err("inuse applied to dead cache");
+               pr_err("inuse applied to dead cache\n");
                return -EIO;
        }
 
@@ -659,11 +659,11 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
 
 notdir:
        path_put(&path);
-       pr_err("inuse command requires dirfd to be a directory");
+       pr_err("inuse command requires dirfd to be a directory\n");
        return -ENOTDIR;
 
 inval:
-       pr_err("inuse command requires dirfd and filename");
+       pr_err("inuse command requires dirfd and filename\n");
        return -EINVAL;
 }
 
index 3d50998abf57053215595209cea8d70b10084232..8c52472d2efa4ec745821e52d9deaa14bd440515 100644 (file)
@@ -255,7 +255,7 @@ extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
 
 #define cachefiles_io_error(___cache, FMT, ...)                \
 do {                                                   \
-       pr_err("I/O Error: " FMT, ##__VA_ARGS__);       \
+       pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__);   \
        fscache_io_error(&(___cache)->cache);           \
        set_bit(CACHEFILES_DEAD, &(___cache)->flags);   \
 } while (0)
index 180edfb45f661fc2a4098f3a099471cc4e0ea93d..711f13d8c2deac9df76f3fca0bc2522b6096bf53 100644 (file)
@@ -84,7 +84,7 @@ error_proc:
 error_object_jar:
        misc_deregister(&cachefiles_dev);
 error_dev:
-       pr_err("failed to register: %d", ret);
+       pr_err("failed to register: %d\n", ret);
        return ret;
 }
 
index 5bf2b41e66d392d0b013a140648c1768bc62b5fd..dad7d9542a24bc287e94567fd7e2c1f4c329ae05 100644 (file)
@@ -543,7 +543,7 @@ lookup_again:
                               next, next->d_inode, next->d_inode->i_ino);
 
                } else if (!S_ISDIR(next->d_inode->i_mode)) {
-                       pr_err("inode %lu is not a directory",
+                       pr_err("inode %lu is not a directory\n",
                               next->d_inode->i_ino);
                        ret = -ENOBUFS;
                        goto error;
@@ -574,7 +574,7 @@ lookup_again:
                } else if (!S_ISDIR(next->d_inode->i_mode) &&
                           !S_ISREG(next->d_inode->i_mode)
                           ) {
-                       pr_err("inode %lu is not a file or directory",
+                       pr_err("inode %lu is not a file or directory\n",
                               next->d_inode->i_ino);
                        ret = -ENOBUFS;
                        goto error;
@@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
        ASSERT(subdir->d_inode);
 
        if (!S_ISDIR(subdir->d_inode->i_mode)) {
-               pr_err("%s is not a directory", dirname);
+               pr_err("%s is not a directory\n", dirname);
                ret = -EIO;
                goto check_error;
        }
@@ -779,7 +779,8 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
            !subdir->d_inode->i_op->lookup ||
            !subdir->d_inode->i_op->mkdir ||
            !subdir->d_inode->i_op->create ||
-           !subdir->d_inode->i_op->rename ||
+           (!subdir->d_inode->i_op->rename &&
+            !subdir->d_inode->i_op->rename2) ||
            !subdir->d_inode->i_op->rmdir ||
            !subdir->d_inode->i_op->unlink)
                goto check_error;
@@ -795,13 +796,13 @@ check_error:
 mkdir_error:
        mutex_unlock(&dir->d_inode->i_mutex);
        dput(subdir);
-       pr_err("mkdir %s failed with error %d", dirname, ret);
+       pr_err("mkdir %s failed with error %d\n", dirname, ret);
        return ERR_PTR(ret);
 
 lookup_error:
        mutex_unlock(&dir->d_inode->i_mutex);
        ret = PTR_ERR(subdir);
-       pr_err("Lookup %s failed with error %d", dirname, ret);
+       pr_err("Lookup %s failed with error %d\n", dirname, ret);
        return ERR_PTR(ret);
 
 nomem_d_alloc:
@@ -891,7 +892,7 @@ lookup_error:
        if (ret == -EIO) {
                cachefiles_io_error(cache, "Lookup failed");
        } else if (ret != -ENOMEM) {
-               pr_err("Internal error: %d", ret);
+               pr_err("Internal error: %d\n", ret);
                ret = -EIO;
        }
 
@@ -950,7 +951,7 @@ error:
        }
 
        if (ret != -ENOMEM) {
-               pr_err("Internal error: %d", ret);
+               pr_err("Internal error: %d\n", ret);
                ret = -EIO;
        }
 
index 4b1fb5ca65b8058c4713e92dda3b1772cdeb6cfe..25e745b8eb1b5fe127be7fb20998444432ca9263 100644 (file)
@@ -151,7 +151,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
        struct cachefiles_one_read *monitor;
        struct cachefiles_object *object;
        struct fscache_retrieval *op;
-       struct pagevec pagevec;
        int error, max;
 
        op = container_of(_op, struct fscache_retrieval, op);
@@ -160,8 +159,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
 
        _enter("{ino=%lu}", object->backer->d_inode->i_ino);
 
-       pagevec_init(&pagevec, 0);
-
        max = 8;
        spin_lock_irq(&object->work_lock);
 
@@ -396,7 +393,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 {
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
-       struct pagevec pagevec;
        struct inode *inode;
        sector_t block0, block;
        unsigned shift;
@@ -427,8 +423,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
        op->op.flags |= FSCACHE_OP_ASYNC;
        op->op.processor = cachefiles_read_copier;
 
-       pagevec_init(&pagevec, 0);
-
        /* we assume the absence or presence of the first block is a good
         * enough indication for the page as a whole
         * - TODO: don't use bmap() for this as it is _not_ actually good
index 1ad51ffbb275150a9f9846e019421eb8b109d152..acbc1f094fb1a418ed489538c85122c609052bd0 100644 (file)
@@ -51,7 +51,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
        }
 
        if (ret != -EEXIST) {
-               pr_err("Can't set xattr on %*.*s [%lu] (err %d)",
+               pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n",
                       dentry->d_name.len, dentry->d_name.len,
                       dentry->d_name.name, dentry->d_inode->i_ino,
                       -ret);
@@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
                if (ret == -ERANGE)
                        goto bad_type_length;
 
-               pr_err("Can't read xattr on %*.*s [%lu] (err %d)",
+               pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n",
                       dentry->d_name.len, dentry->d_name.len,
                       dentry->d_name.name, dentry->d_inode->i_ino,
                       -ret);
@@ -85,14 +85,14 @@ error:
        return ret;
 
 bad_type_length:
-       pr_err("Cache object %lu type xattr length incorrect",
+       pr_err("Cache object %lu type xattr length incorrect\n",
               dentry->d_inode->i_ino);
        ret = -EIO;
        goto error;
 
 bad_type:
        xtype[2] = 0;
-       pr_err("Cache object %*.*s [%lu] type %s not %s",
+       pr_err("Cache object %*.*s [%lu] type %s not %s\n",
               dentry->d_name.len, dentry->d_name.len,
               dentry->d_name.name, dentry->d_inode->i_ino,
               xtype, type);
@@ -293,7 +293,7 @@ error:
        return ret;
 
 bad_type_length:
-       pr_err("Cache object %lu xattr length incorrect",
+       pr_err("Cache object %lu xattr length incorrect\n",
               dentry->d_inode->i_ino);
        ret = -EIO;
        goto error;
index 603f18a65c121e5586f903baff101a8637bdb852..a2172f3f69e318915f092885da0b4d49755d33f1 100644 (file)
@@ -22,6 +22,11 @@ config CIFS
          support for OS/2 and Windows ME and similar servers is provided as
          well.
 
+         The module also provides optional support for the followon
+         protocols for CIFS including SMB3, which enables
+         useful performance and security features (see the description
+         of CONFIG_CIFS_SMB2).
+
          The cifs module provides an advanced network file system
          client for mounting to CIFS compliant servers.  It includes
          support for DFS (hierarchical name space), secure per-user
@@ -121,7 +126,8 @@ config CIFS_ACL
          depends on CIFS_XATTR && KEYS
          help
            Allows fetching CIFS/NTFS ACL from the server.  The DACL blob
-           is handed over to the application/caller.
+           is handed over to the application/caller.  See the man
+           page for getcifsacl for more information.
 
 config CIFS_DEBUG
        bool "Enable CIFS debugging routines"
@@ -162,7 +168,7 @@ config CIFS_NFSD_EXPORT
           Allows NFS server to export a CIFS mounted share (nfsd over cifs)
 
 config CIFS_SMB2
-       bool "SMB2 network file system support"
+       bool "SMB2 and SMB3 network file system support"
        depends on CIFS && INET
        select NLS
        select KEYS
@@ -170,16 +176,21 @@ config CIFS_SMB2
        select DNS_RESOLVER
 
        help
-         This enables experimental support for the SMB2 (Server Message Block
-         version 2) protocol. The SMB2 protocol is the successor to the
-         popular CIFS and SMB network file sharing protocols. SMB2 is the
-         native file sharing mechanism for recent versions of Windows
-         operating systems (since Vista).  SMB2 enablement will eventually
-         allow users better performance, security and features, than would be
-         possible with cifs. Note that smb2 mount options also are simpler
-         (compared to cifs) due to protocol improvements.
-
-         Unless you are a developer or tester, say N.
+         This enables support for the Server Message Block version 2
+         family of protocols, including SMB3.  SMB3 support is
+         enabled on mount by specifying "vers=3.0" in the mount
+         options. These protocols are the successors to the popular
+         CIFS and SMB network file sharing protocols. SMB3 is the
+         native file sharing mechanism for the more recent
+         versions of Windows (Windows 8 and Windows 2012 and
+         later) and Samba server and many others support SMB3 well.
+         In general SMB3 enables better performance, security
+         and features, than would be possible with CIFS (Note that
+         when mounting to Samba, due to the CIFS POSIX extensions,
+         CIFS mounts can provide slightly better POSIX compatibility
+         than SMB3 mounts do though). Note that SMB2/SMB3 mount
+         options are also slightly simpler (compared to CIFS) due
+         to protocol improvements.
 
 config CIFS_FSCACHE
          bool "Provide CIFS client caching support"
index b0fafa499505218fb0cecac43cfb9db4e9a77f26..002e0c17393903f62fe4e382acb23df44a9ccec7 100644 (file)
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.04"
+#define CIFS_VERSION   "2.05"
 #endif                         /* _CIFSFS_H */
index dfc731b02aa9b3daca3a15d95346bcd82cc2b3d5..25b8392bfdd2a7fd9864af5008e809dc41e3a7df 100644 (file)
 #define SERVER_NAME_LENGTH 40
 #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
 
-/* used to define string lengths for reversing unicode strings */
-/*         (256+1)*2 = 514                                     */
-/*           (max path length + 1 for null) * 2 for unicode    */
-#define MAX_NAME 514
-
 /* SMB echo "timeout" -- FIXME: tunable? */
 #define SMB_ECHO_INTERVAL (60 * HZ)
 
index 03ed8a09581ca0104468d1207ff0cadefd0f9390..36ca2045009bf342ef507c132921484c7eaf7ded 100644 (file)
@@ -1600,6 +1600,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        tmp_end++;
                        if (!(tmp_end < end && tmp_end[1] == delim)) {
                                /* No it is not. Set the password to NULL */
+                               kfree(vol->password);
                                vol->password = NULL;
                                break;
                        }
@@ -1637,6 +1638,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                        options = end;
                        }
 
+                       kfree(vol->password);
                        /* Now build new password string */
                        temp_len = strlen(value);
                        vol->password = kzalloc(temp_len+1, GFP_KERNEL);
index 3db0c5fd9a1109629764cf5f2b759dd9ddd8cd8a..6cbd9c688cfe818a773a2a7f42c64ce59d693de1 100644 (file)
@@ -497,6 +497,14 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                goto out;
        }
 
+       if (file->f_flags & O_DIRECT &&
+           CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+               if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       file->f_op = &cifs_file_direct_nobrl_ops;
+               else
+                       file->f_op = &cifs_file_direct_ops;
+               }
+
        file_info = cifs_new_fileinfo(&fid, file, tlink, oplock);
        if (file_info == NULL) {
                if (server->ops->close)
index d5fec92e036068b98bf2e3426e3f8d159b70e048..5f29354b072abfe2a44a4303edc92854c1959082 100644 (file)
@@ -467,6 +467,14 @@ int cifs_open(struct inode *inode, struct file *file)
        cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
                 inode, file->f_flags, full_path);
 
+       if (file->f_flags & O_DIRECT &&
+           cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       file->f_op = &cifs_file_direct_nobrl_ops;
+               else
+                       file->f_op = &cifs_file_direct_ops;
+       }
+
        if (server->oplocks)
                oplock = REQ_OPLOCK;
        else
@@ -3560,15 +3568,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                                lru_cache_add_file(page);
                                unlock_page(page);
                                page_cache_release(page);
-                               if (rc == -EAGAIN)
-                                       list_add_tail(&page->lru, &tmplist);
                        }
+                       /* Fallback to the readpage in error/reconnect cases */
                        kref_put(&rdata->refcount, cifs_readdata_release);
-                       if (rc == -EAGAIN) {
-                               /* Re-add pages to the page_list and retry */
-                               list_splice(&tmplist, page_list);
-                               continue;
-                       }
                        break;
                }
 
index 949ec909ec9ab94747ed332cc96cdcdbc29fd47b..7899a40465b303db85e1903e9fec1fe57124a261 100644 (file)
@@ -1720,7 +1720,10 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
 unlink_target:
        /* Try unlinking the target dentry if it's not negative */
        if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
-               tmprc = cifs_unlink(target_dir, target_dentry);
+               if (d_is_dir(target_dentry))
+                       tmprc = cifs_rmdir(target_dir, target_dentry);
+               else
+                       tmprc = cifs_unlink(target_dir, target_dentry);
                if (tmprc)
                        goto cifs_rename_exit;
                rc = cifs_do_rename(xid, source_dentry, from_name,
index 68559fd557fbbc845e71c71498f314887494c7fc..5657416d3483fd9dea39601e61e5c64e08ec3894 100644 (file)
@@ -213,8 +213,12 @@ create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                goto out;
 
-       rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon, cifs_sb,
-                                       fromName, buf, &bytes_written);
+       if (tcon->ses->server->ops->create_mf_symlink)
+               rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon,
+                                       cifs_sb, fromName, buf, &bytes_written);
+       else
+               rc = -EOPNOTSUPP;
+
        if (rc)
                goto out;
 
@@ -339,9 +343,11 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                return rc;
 
-       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE))
+       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
+               rc = -ENOENT;
                /* it's not a symlink */
                goto out;
+       }
 
        io_parms.netfid = fid.netfid;
        io_parms.pid = current->tgid;
index 6834b9c3bec12505fe1adc55e17f911fef018eac..b333ff60781d295809d8fa8f23366f9bcf8d9285 100644 (file)
@@ -925,11 +925,23 @@ cifs_NTtimeToUnix(__le64 ntutc)
        /* BB what about the timezone? BB */
 
        /* Subtract the NTFS time offset, then convert to 1s intervals. */
-       u64 t;
+       s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+
+       /*
+        * Unfortunately can not use normal 64 bit division on 32 bit arch, but
+        * the alternative, do_div, does not work with negative numbers so have
+        * to special case them
+        */
+       if (t < 0) {
+               t = -t;
+               ts.tv_nsec = (long)(do_div(t, 10000000) * 100);
+               ts.tv_nsec = -ts.tv_nsec;
+               ts.tv_sec = -t;
+       } else {
+               ts.tv_nsec = (long)do_div(t, 10000000) * 100;
+               ts.tv_sec = t;
+       }
 
-       t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
-       ts.tv_nsec = do_div(t, 10000000) * 100;
-       ts.tv_sec = t;
        return ts;
 }
 
index 798c80a41c886b3fba7e66f8264066902d35b8cb..b334a89d6a66eb2151973d6405083db3c254710b 100644 (file)
@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
                if (server->ops->dir_needs_close(cfile)) {
                        cfile->invalidHandle = true;
                        spin_unlock(&cifs_file_list_lock);
-                       if (server->ops->close)
-                               server->ops->close(xid, tcon, &cfile->fid);
+                       if (server->ops->close_dir)
+                               server->ops->close_dir(xid, tcon, &cfile->fid);
                } else
                        spin_unlock(&cifs_file_list_lock);
                if (cfile->srch_inf.ntwrk_buf_start) {
index 39ee32688eac93abb107eb07e5f5566237602ba8..57db63ff88da282cca615c998e0e495ccc58ee15 100644 (file)
@@ -243,10 +243,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
        kfree(ses->serverOS);
 
        ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
-       if (ses->serverOS)
+       if (ses->serverOS) {
                strncpy(ses->serverOS, bcc_ptr, len);
-       if (strncmp(ses->serverOS, "OS/2", 4) == 0)
-               cifs_dbg(FYI, "OS/2 server\n");
+               if (strncmp(ses->serverOS, "OS/2", 4) == 0)
+                       cifs_dbg(FYI, "OS/2 server\n");
+       }
 
        bcc_ptr += len + 1;
        bleft -= len + 1;
@@ -744,14 +745,6 @@ out:
        sess_free_buffer(sess_data);
 }
 
-#else
-
-static void
-sess_auth_lanman(struct sess_data *sess_data)
-{
-       sess_data->result = -EOPNOTSUPP;
-       sess_data->func = NULL;
-}
 #endif
 
 static void
@@ -1102,15 +1095,6 @@ out:
        ses->auth_key.response = NULL;
 }
 
-#else
-
-static void
-sess_auth_kerberos(struct sess_data *sess_data)
-{
-       cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
-       sess_data->result = -ENOSYS;
-       sess_data->func = NULL;
-}
 #endif /* ! CONFIG_CIFS_UPCALL */
 
 /*
index 1a6df4b03f67cae97688d7a9578d7317a88419bf..52131d8cb4d5397c6c75f0290c0d226d76e6d67c 100644 (file)
@@ -586,7 +586,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
                tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
                if (tmprc == -EOPNOTSUPP)
                        *symlink = true;
-               else
+               else if (tmprc == 0)
                        CIFSSMBClose(xid, tcon, fid.netfid);
        }
 
index 3f17b455083141c572fac5d88e95e56fbe54f89a..45992944e23859b1706fed3d0430ea0cee445a10 100644 (file)
@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                goto out;
        }
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL) {
                rc = -ENOMEM;
index 0150182a44946dcebdb9aae71130f7c8a479eab9..899bbc86f73e1416aafcd00708052c22bd87f52e 100644 (file)
@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
        *adjust_tz = false;
        *symlink = false;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
index af59d03db49280488a33abe5ec372599aa8ad4ed..8257a5a97cc0349e9f7231f02ab36e624de2c02a 100644 (file)
@@ -256,6 +256,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
        "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
        {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
+       {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP,
+       "STATUS_REPARSE_NOT_HANDLED"},
        {STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
        "STATUS_DEVICE_REQUIRES_CLEANING"},
        {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
index 5a48aa290dfe83fdab6f9af322aadcdd0ebe3b3b..f522193b7184facd4926cfa3447b7fcbf5268cce 100644 (file)
@@ -389,7 +389,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        struct smb2_file_all_info *smb2_data;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
@@ -1035,7 +1035,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
                if (keep_size == false)
                        return -EOPNOTSUPP;
 
-       /* 
+       /*
         * Must check if file sparse since fallocate -z (zero range) assumes
         * non-sparse allocation
         */
index fa0dd044213b9dc7dab5ec26d37c469627e9ca49..74b3a6684383c9bfd08f481c8575932cefa06766 100644 (file)
@@ -530,7 +530,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
        struct smb2_sess_setup_rsp *rsp = NULL;
        struct kvec iov[2];
        int rc = 0;
-       int resp_buftype;
+       int resp_buftype = CIFS_NO_BUFFER;
        __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
        struct TCP_Server_Info *server = ses->server;
        u16 blob_length = 0;
@@ -1403,8 +1403,7 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
        rsp = (struct smb2_close_rsp *)iov[0].iov_base;
 
        if (rc != 0) {
-               if (tcon)
-                       cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
+               cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
                goto close_exit;
        }
 
@@ -1533,7 +1532,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
 {
        return query_info(xid, tcon, persistent_fid, volatile_fid,
                          FILE_ALL_INFORMATION,
-                         sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+                         sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                          sizeof(struct smb2_file_all_info), data);
 }
 
index d30ce699ae4b6ea4ac3ad1b8ec955fa271bdcb16..cb25a1a5e3073748cbef154b6d3575da3f55982d 100644 (file)
@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
                                        unsigned int hash)
 {
        hash += (unsigned long) parent / L1_CACHE_BYTES;
-       hash = hash + (hash >> d_hash_shift);
-       return dentry_hashtable + (hash & d_hash_mask);
+       return dentry_hashtable + hash_32(hash, d_hash_shift);
 }
 
 /* Statistics gathering. */
@@ -2373,7 +2372,8 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
 }
 EXPORT_SYMBOL(dentry_update_name_case);
 
-static void switch_names(struct dentry *dentry, struct dentry *target)
+static void switch_names(struct dentry *dentry, struct dentry *target,
+                        bool exchange)
 {
        if (dname_external(target)) {
                if (dname_external(dentry)) {
@@ -2407,13 +2407,19 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
                         */
                        unsigned int i;
                        BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
+                       if (!exchange) {
+                               memcpy(dentry->d_iname, target->d_name.name,
+                                               target->d_name.len + 1);
+                               dentry->d_name.hash_len = target->d_name.hash_len;
+                               return;
+                       }
                        for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
                                swap(((long *) &dentry->d_iname)[i],
                                     ((long *) &target->d_iname)[i]);
                        }
                }
        }
-       swap(dentry->d_name.len, target->d_name.len);
+       swap(dentry->d_name.hash_len, target->d_name.hash_len);
 }
 
 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
@@ -2443,25 +2449,29 @@ static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
        }
 }
 
-static void dentry_unlock_parents_for_move(struct dentry *dentry,
-                                       struct dentry *target)
+static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
 {
        if (target->d_parent != dentry->d_parent)
                spin_unlock(&dentry->d_parent->d_lock);
        if (target->d_parent != target)
                spin_unlock(&target->d_parent->d_lock);
+       spin_unlock(&target->d_lock);
+       spin_unlock(&dentry->d_lock);
 }
 
 /*
  * When switching names, the actual string doesn't strictly have to
  * be preserved in the target - because we're dropping the target
  * anyway. As such, we can just do a simple memcpy() to copy over
- * the new name before we switch.
- *
- * Note that we have to be a lot more careful about getting the hash
- * switched - we have to switch the hash value properly even if it
- * then no longer matches the actual (corrupted) string of the target.
- * The hash value has to match the hash queue that the dentry is on..
+ * the new name before we switch, unless we are going to rehash
+ * it.  Note that if we *do* unhash the target, we are not allowed
+ * to rehash it without giving it a new name/hash key - whether
+ * we swap or overwrite the names here, resulting name won't match
+ * the reality in filesystem; it's only there for d_path() purposes.
+ * Note that all of this is happening under rename_lock, so the
+ * any hash lookup seeing it in the middle of manipulations will
+ * be discarded anyway.  So we do not care what happens to the hash
+ * key in that case.
  */
 /*
  * __d_move - move a dentry
@@ -2507,36 +2517,30 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
                           d_hash(dentry->d_parent, dentry->d_name.hash));
        }
 
-       list_del(&dentry->d_u.d_child);
-       list_del(&target->d_u.d_child);
-
        /* Switch the names.. */
-       switch_names(dentry, target);
-       swap(dentry->d_name.hash, target->d_name.hash);
+       switch_names(dentry, target, exchange);
 
-       /* ... and switch the parents */
+       /* ... and switch them in the tree */
        if (IS_ROOT(dentry)) {
+               /* splicing a tree */
                dentry->d_parent = target->d_parent;
                target->d_parent = target;
-               INIT_LIST_HEAD(&target->d_u.d_child);
+               list_del_init(&target->d_u.d_child);
+               list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
        } else {
+               /* swapping two dentries */
                swap(dentry->d_parent, target->d_parent);
-
-               /* And add them back to the (new) parent lists */
-               list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
+               list_move(&target->d_u.d_child, &target->d_parent->d_subdirs);
+               list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
+               if (exchange)
+                       fsnotify_d_move(target);
+               fsnotify_d_move(dentry);
        }
 
-       list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
-
        write_seqcount_end(&target->d_seq);
        write_seqcount_end(&dentry->d_seq);
 
-       dentry_unlock_parents_for_move(dentry, target);
-       if (exchange)
-               fsnotify_d_move(target);
-       spin_unlock(&target->d_lock);
-       fsnotify_d_move(dentry);
-       spin_unlock(&dentry->d_lock);
+       dentry_unlock_for_move(dentry, target);
 }
 
 /*
@@ -2634,39 +2638,6 @@ out_err:
        return ret;
 }
 
-/*
- * Prepare an anonymous dentry for life in the superblock's dentry tree as a
- * named dentry in place of the dentry to be replaced.
- * returns with anon->d_lock held!
- */
-static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
-{
-       struct dentry *dparent;
-
-       dentry_lock_for_move(anon, dentry);
-
-       write_seqcount_begin(&dentry->d_seq);
-       write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
-
-       dparent = dentry->d_parent;
-
-       switch_names(dentry, anon);
-       swap(dentry->d_name.hash, anon->d_name.hash);
-
-       dentry->d_parent = dentry;
-       list_del_init(&dentry->d_u.d_child);
-       anon->d_parent = dparent;
-       list_move(&anon->d_u.d_child, &dparent->d_subdirs);
-
-       write_seqcount_end(&dentry->d_seq);
-       write_seqcount_end(&anon->d_seq);
-
-       dentry_unlock_parents_for_move(anon, dentry);
-       spin_unlock(&dentry->d_lock);
-
-       /* anon->d_lock still locked, returns locked */
-}
-
 /**
  * d_splice_alias - splice a disconnected dentry into the tree if one exists
  * @inode:  the inode which may have a disconnected dentry
@@ -2712,11 +2683,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                                return ERR_PTR(-EIO);
                        }
                        write_seqlock(&rename_lock);
-                       __d_materialise_dentry(dentry, new);
+                       __d_move(new, dentry, false);
                        write_sequnlock(&rename_lock);
-                       __d_drop(new);
-                       _d_rehash(new);
-                       spin_unlock(&new->d_lock);
                        spin_unlock(&inode->i_lock);
                        security_d_instantiate(new, inode);
                        iput(inode);
@@ -2776,9 +2744,8 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                        } else if (IS_ROOT(alias)) {
                                /* Is this an anonymous mountpoint that we
                                 * could splice into our tree? */
-                               __d_materialise_dentry(dentry, alias);
+                               __d_move(alias, dentry, false);
                                write_sequnlock(&rename_lock);
-                               __d_drop(alias);
                                goto found;
                        } else {
                                /* Nope, but we must(!) avoid directory
@@ -2804,13 +2771,9 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
        actual = __d_instantiate_unique(dentry, inode);
        if (!actual)
                actual = dentry;
-       else
-               BUG_ON(!d_unhashed(actual));
 
-       spin_lock(&actual->d_lock);
+       d_rehash(actual);
 found:
-       _d_rehash(actual);
-       spin_unlock(&actual->d_lock);
        spin_unlock(&inode->i_lock);
 out_nolock:
        if (actual == dentry) {
index c3116404ab49a29c3b11fb53c175132f715750a5..e181b6b2e297fb5d3bd03a07efe382f0dd204972 100644 (file)
@@ -158,7 +158,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
 {
        ssize_t ret;
 
-       ret = iov_iter_get_pages(sdio->iter, dio->pages, DIO_PAGES,
+       ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
                                &sdio->from);
 
        if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
index b10b48c2a7afaf859f2b508d5bca784abfe870ac..7bcfff900f058cd9d98278c1fa13ed2b0202eccc 100644 (file)
@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                goto error_tgt_fput;
 
        /* Check if EPOLLWAKEUP is allowed */
-       ep_take_care_of_epollwakeup(&epds);
+       if (ep_op_has_event(op))
+               ep_take_care_of_epollwakeup(&epds);
 
        /*
         * We have to check that the file structure underneath the file descriptor
index 90a3cdca3f88b8d30adffe6b6dba52d613b1c9c2..603e4ebbd0ac1a8eb33f27f0e06cf302d3ce3352 100644 (file)
@@ -3240,6 +3240,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                 &new.de, &new.inlined);
        if (IS_ERR(new.bh)) {
                retval = PTR_ERR(new.bh);
+               new.bh = NULL;
                goto end_rename;
        }
        if (new.bh) {
@@ -3386,6 +3387,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
                                 &new.de, &new.inlined);
        if (IS_ERR(new.bh)) {
                retval = PTR_ERR(new.bh);
+               new.bh = NULL;
                goto end_rename;
        }
 
index bb0e80f03e2eb7291c91b9808f96d5469edfd903..1e43b905ff9854d7a6f45eab3da091b5867a0188 100644 (file)
@@ -575,6 +575,7 @@ handle_bb:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
+                       bh = NULL;
                        goto out;
                }
                overhead = ext4_group_overhead_blocks(sb, group);
@@ -603,6 +604,7 @@ handle_ib:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
+                       bh = NULL;
                        goto out;
                }
 
index d3b4539f16515451cdd691fd853927d00fa3f8a8..da032daf0e0d7562f40e5a050685829af68869ac 100644 (file)
@@ -982,6 +982,7 @@ nomem:
 submit_op_failed:
        clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
        spin_unlock(&cookie->lock);
+       fscache_unuse_cookie(object);
        kfree(op);
        _leave(" [EIO]");
        return transit_to(KILL_OBJECT);
index 85332b9d19d1613322168bc8a1c9ab6f3263e183..de33b3fccca650da99ee850597d5aef437b3a6fe 100644 (file)
@@ -43,6 +43,19 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
 }
 EXPORT_SYMBOL(__fscache_wait_on_page_write);
 
+/*
+ * wait for a page to finish being written to the cache. Put a timeout here
+ * since we might be called recursively via parent fs.
+ */
+static
+bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
+
+       return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
+                                 HZ);
+}
+
 /*
  * decide whether a page can be released, possibly by cancelling a store to it
  * - we're allowed to sleep if __GFP_WAIT is flagged
@@ -115,7 +128,10 @@ page_busy:
        }
 
        fscache_stat(&fscache_n_store_vmscan_wait);
-       __fscache_wait_on_page_write(cookie, page);
+       if (!release_page_wait_timeout(cookie, page))
+               _debug("fscache writeout timeout page: %p{%lx}",
+                       page, page->index);
+
        gfp &= ~__GFP_WAIT;
        goto try_again;
 }
@@ -182,7 +198,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 {
        struct fscache_operation *op;
        struct fscache_object *object;
-       bool wake_cookie;
+       bool wake_cookie = false;
 
        _enter("%p", cookie);
 
@@ -212,15 +228,16 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 
        __fscache_use_cookie(cookie);
        if (fscache_submit_exclusive_op(object, op) < 0)
-               goto nobufs;
+               goto nobufs_dec;
        spin_unlock(&cookie->lock);
        fscache_stat(&fscache_n_attr_changed_ok);
        fscache_put_operation(op);
        _leave(" = 0");
        return 0;
 
-nobufs:
+nobufs_dec:
        wake_cookie = __fscache_unuse_cookie(cookie);
+nobufs:
        spin_unlock(&cookie->lock);
        kfree(op);
        if (wake_cookie)
index 912061ac4baf9e6eeca15ea82394052e2583a809..caa8d95b24e843581342c80c2552b69d6cfd223f 100644 (file)
@@ -1305,6 +1305,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
                size_t start;
                ssize_t ret = iov_iter_get_pages(ii,
                                        &req->pages[req->num_pages],
+                                       *nbytesp - nbytes,
                                        req->max_pages - req->num_pages,
                                        &start);
                if (ret < 0)
index e6ee5b6e8d99379e97cf5d777892dbd1065e595d..f0b945ab853e488a852a1a8459e9aa873def54b9 100644 (file)
@@ -359,7 +359,7 @@ static inline void release_metapath(struct metapath *mp)
  * Returns: The length of the extent (minimum of one block)
  */
 
-static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
+static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
 {
        const __be64 *end = (start + len);
        const __be64 *first = ptr;
@@ -449,7 +449,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
                           struct buffer_head *bh_map, struct metapath *mp,
                           const unsigned int sheight,
                           const unsigned int height,
-                          const unsigned int maxlen)
+                          const size_t maxlen)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -483,7 +483,8 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
        } else {
                /* Need to allocate indirect blocks */
                ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
-               dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
+               dblks = min(maxlen, (size_t)(ptrs_per_blk -
+                                            mp->mp_list[end_of_metadata]));
                if (height == ip->i_height) {
                        /* Writing into existing tree, extend tree down */
                        iblks = height - sheight;
@@ -605,7 +606,7 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        unsigned int bsize = sdp->sd_sb.sb_bsize;
-       const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
+       const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
        const u64 *arr = sdp->sd_heightsize;
        __be64 *ptr;
        u64 size;
index 26b3f952e6b19cccd2e0333f45498c5531a99c62..7f4ed3daa38c4a190132c1c1fa6ce1b1877e0b94 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/dlm.h>
 #include <linux/dlm_plock.h>
 #include <linux/aio.h>
+#include <linux/delay.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -979,9 +980,10 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
        unsigned int state;
        int flags;
        int error = 0;
+       int sleeptime;
 
        state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
-       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
+       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
 
        mutex_lock(&fp->f_fl_mutex);
 
@@ -1001,7 +1003,14 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                gfs2_holder_init(gl, state, flags, fl_gh);
                gfs2_glock_put(gl);
        }
-       error = gfs2_glock_nq(fl_gh);
+       for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
+               error = gfs2_glock_nq(fl_gh);
+               if (error != GLR_TRYFAILED)
+                       break;
+               fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
+               fl_gh->gh_error = 0;
+               msleep(sleeptime);
+       }
        if (error) {
                gfs2_holder_uninit(fl_gh);
                if (error == GLR_TRYFAILED)
@@ -1024,7 +1033,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
        mutex_lock(&fp->f_fl_mutex);
        flock_lock_file_wait(file, fl);
        if (fl_gh->gh_gl) {
-               gfs2_glock_dq_wait(fl_gh);
+               gfs2_glock_dq(fl_gh);
                gfs2_holder_uninit(fl_gh);
        }
        mutex_unlock(&fp->f_fl_mutex);
index 67d310c9ada3222ef1c08333397f65a29d1e454a..39e7e9959b7462f99a7d04dff4759445f581d9df 100644 (file)
@@ -262,6 +262,9 @@ struct gfs2_holder {
        unsigned long gh_ip;
 };
 
+/* Number of quota types we support */
+#define GFS2_MAXQUOTAS 2
+
 /* Resource group multi-block reservation, in order of appearance:
 
    Step 1. Function prepares to write, allocates a mb, sets the size hint.
@@ -282,8 +285,8 @@ struct gfs2_blkreserv {
        u64 rs_inum;                  /* Inode number for reservation */
 
        /* ancillary quota stuff */
-       struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS];
-       struct gfs2_holder rs_qa_qd_ghs[2 * MAXQUOTAS];
+       struct gfs2_quota_data *rs_qa_qd[2 * GFS2_MAXQUOTAS];
+       struct gfs2_holder rs_qa_qd_ghs[2 * GFS2_MAXQUOTAS];
        unsigned int rs_qa_qd_num;
 };
 
index e62e594778848ba313b17269cf9f3e7832aba0bb..fc8ac2ee0667c8d66082aa48f05f9acee452a8ac 100644 (file)
@@ -626,8 +626,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (!IS_ERR(inode)) {
                d = d_splice_alias(inode, dentry);
                error = PTR_ERR(d);
-               if (IS_ERR(d))
+               if (IS_ERR(d)) {
+                       inode = ERR_CAST(d);
                        goto fail_gunlock;
+               }
                error = 0;
                if (file) {
                        if (S_ISREG(inode->i_mode)) {
@@ -840,8 +842,10 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
        int error;
 
        inode = gfs2_lookupi(dir, &dentry->d_name, 0);
-       if (!inode)
+       if (inode == NULL) {
+               d_add(dentry, NULL);
                return NULL;
+       }
        if (IS_ERR(inode))
                return ERR_CAST(inode);
 
@@ -854,7 +858,6 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
 
        d = d_splice_alias(inode, dentry);
        if (IS_ERR(d)) {
-               iput(inode);
                gfs2_glock_dq_uninit(&gh);
                return d;
        }
index 2607ff13d4864536a3f60ca6e2343f39b58b1b93..a346f56c4c6df97dfbc1459b61a9f6d70e42bc11 100644 (file)
@@ -1294,7 +1294,7 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
        int val;
 
        if (is_ancestor(root, sdp->sd_master_dir))
-               seq_printf(s, ",meta");
+               seq_puts(s, ",meta");
        if (args->ar_lockproto[0])
                seq_printf(s, ",lockproto=%s", args->ar_lockproto);
        if (args->ar_locktable[0])
@@ -1302,13 +1302,13 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
        if (args->ar_hostdata[0])
                seq_printf(s, ",hostdata=%s", args->ar_hostdata);
        if (args->ar_spectator)
-               seq_printf(s, ",spectator");
+               seq_puts(s, ",spectator");
        if (args->ar_localflocks)
-               seq_printf(s, ",localflocks");
+               seq_puts(s, ",localflocks");
        if (args->ar_debug)
-               seq_printf(s, ",debug");
+               seq_puts(s, ",debug");
        if (args->ar_posix_acl)
-               seq_printf(s, ",acl");
+               seq_puts(s, ",acl");
        if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
                char *state;
                switch (args->ar_quota) {
@@ -1328,7 +1328,7 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",quota=%s", state);
        }
        if (args->ar_suiddir)
-               seq_printf(s, ",suiddir");
+               seq_puts(s, ",suiddir");
        if (args->ar_data != GFS2_DATA_DEFAULT) {
                char *state;
                switch (args->ar_data) {
@@ -1345,7 +1345,7 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",data=%s", state);
        }
        if (args->ar_discard)
-               seq_printf(s, ",discard");
+               seq_puts(s, ",discard");
        val = sdp->sd_tune.gt_logd_secs;
        if (val != 30)
                seq_printf(s, ",commit=%d", val);
@@ -1376,11 +1376,11 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",errors=%s", state);
        }
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
-               seq_printf(s, ",nobarrier");
+               seq_puts(s, ",nobarrier");
        if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
-               seq_printf(s, ",demote_interface_used");
+               seq_puts(s, ",demote_interface_used");
        if (args->ar_rgrplvb)
-               seq_printf(s, ",rgrplvb");
+               seq_puts(s, ",rgrplvb");
        return 0;
 }
 
index 8f27c93f8d2ed88edd55e623baac11b6f94ccfd5..ec9e082f9ecd905af16f2dd09de37822376cfe66 100644 (file)
@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
 
        error = make_socks(serv, net);
        if (error < 0)
-               goto err_socks;
+               goto err_bind;
        set_grace_period(net);
        dprintk("lockd_up_net: per-net data created; net=%p\n", net);
        return 0;
 
-err_socks:
-       svc_rpcb_cleanup(serv, net);
 err_bind:
        ln->nlmsvc_users--;
        return error;
index a996bb48dfabf4f645dced56f527ed3d5568bb2c..a7b05bf82d31ad2e8eacbac999a049cab1f8d446 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/device_cgroup.h>
 #include <linux/fs_struct.h>
 #include <linux/posix_acl.h>
+#include <linux/hash.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -643,24 +644,22 @@ static int complete_walk(struct nameidata *nd)
 
 static __always_inline void set_root(struct nameidata *nd)
 {
-       if (!nd->root.mnt)
-               get_fs_root(current->fs, &nd->root);
+       get_fs_root(current->fs, &nd->root);
 }
 
 static int link_path_walk(const char *, struct nameidata *);
 
-static __always_inline void set_root_rcu(struct nameidata *nd)
+static __always_inline unsigned set_root_rcu(struct nameidata *nd)
 {
-       if (!nd->root.mnt) {
-               struct fs_struct *fs = current->fs;
-               unsigned seq;
+       struct fs_struct *fs = current->fs;
+       unsigned seq, res;
 
-               do {
-                       seq = read_seqcount_begin(&fs->seq);
-                       nd->root = fs->root;
-                       nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
-               } while (read_seqcount_retry(&fs->seq, seq));
-       }
+       do {
+               seq = read_seqcount_begin(&fs->seq);
+               nd->root = fs->root;
+               res = __read_seqcount_begin(&nd->root.dentry->d_seq);
+       } while (read_seqcount_retry(&fs->seq, seq));
+       return res;
 }
 
 static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -860,7 +859,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
                        return PTR_ERR(s);
                }
                if (*s == '/') {
-                       set_root(nd);
+                       if (!nd->root.mnt)
+                               set_root(nd);
                        path_put(&nd->path);
                        nd->path = nd->root;
                        path_get(&nd->root);
@@ -1137,13 +1137,15 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                 */
                *inode = path->dentry->d_inode;
        }
-       return read_seqretry(&mount_lock, nd->m_seq) &&
+       return !read_seqretry(&mount_lock, nd->m_seq) &&
                !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
 }
 
 static int follow_dotdot_rcu(struct nameidata *nd)
 {
-       set_root_rcu(nd);
+       struct inode *inode = nd->inode;
+       if (!nd->root.mnt)
+               set_root_rcu(nd);
 
        while (1) {
                if (nd->path.dentry == nd->root.dentry &&
@@ -1155,6 +1157,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                        struct dentry *parent = old->d_parent;
                        unsigned seq;
 
+                       inode = parent->d_inode;
                        seq = read_seqcount_begin(&parent->d_seq);
                        if (read_seqcount_retry(&old->d_seq, nd->seq))
                                goto failed;
@@ -1164,6 +1167,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                }
                if (!follow_up_rcu(&nd->path))
                        break;
+               inode = nd->path.dentry->d_inode;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
        }
        while (d_mountpoint(nd->path.dentry)) {
@@ -1173,11 +1177,12 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                        break;
                nd->path.mnt = &mounted->mnt;
                nd->path.dentry = mounted->mnt.mnt_root;
+               inode = nd->path.dentry->d_inode;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
-               if (!read_seqretry(&mount_lock, nd->m_seq))
+               if (read_seqretry(&mount_lock, nd->m_seq))
                        goto failed;
        }
-       nd->inode = nd->path.dentry->d_inode;
+       nd->inode = inode;
        return 0;
 
 failed:
@@ -1256,7 +1261,8 @@ static void follow_mount(struct path *path)
 
 static void follow_dotdot(struct nameidata *nd)
 {
-       set_root(nd);
+       if (!nd->root.mnt)
+               set_root(nd);
 
        while(1) {
                struct dentry *old = nd->path.dentry;
@@ -1634,8 +1640,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
 
 static inline unsigned int fold_hash(unsigned long hash)
 {
-       hash += hash >> (8*sizeof(int));
-       return hash;
+       return hash_64(hash, 32);
 }
 
 #else  /* 32-bit case */
@@ -1669,9 +1674,9 @@ EXPORT_SYMBOL(full_name_hash);
 
 /*
  * Calculate the length and hash of the path component, and
- * return the length of the component;
+ * return the "hash_len" as the result.
  */
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline u64 hash_name(const char *name)
 {
        unsigned long a, b, adata, bdata, mask, hash, len;
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
@@ -1691,9 +1696,8 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
        mask = create_zero_mask(adata | bdata);
 
        hash += a & zero_bytemask(mask);
-       *hashp = fold_hash(hash);
-
-       return len + find_zero(mask);
+       len += find_zero(mask);
+       return hashlen_create(fold_hash(hash), len);
 }
 
 #else
@@ -1711,7 +1715,7 @@ EXPORT_SYMBOL(full_name_hash);
  * We know there's a real path component here of at least
  * one character.
  */
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline u64 hash_name(const char *name)
 {
        unsigned long hash = init_name_hash();
        unsigned long len = 0, c;
@@ -1722,8 +1726,7 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
                hash = partial_name_hash(c, hash);
                c = (unsigned char)name[len];
        } while (c && c != '/');
-       *hashp = end_name_hash(hash);
-       return len;
+       return hashlen_create(end_name_hash(hash), len);
 }
 
 #endif
@@ -1748,20 +1751,17 @@ static int link_path_walk(const char *name, struct nameidata *nd)
 
        /* At this point we know we have a real path component. */
        for(;;) {
-               struct qstr this;
-               long len;
+               u64 hash_len;
                int type;
 
                err = may_lookup(nd);
                if (err)
                        break;
 
-               len = hash_name(name, &this.hash);
-               this.name = name;
-               this.len = len;
+               hash_len = hash_name(name);
 
                type = LAST_NORM;
-               if (name[0] == '.') switch (len) {
+               if (name[0] == '.') switch (hashlen_len(hash_len)) {
                        case 2:
                                if (name[1] == '.') {
                                        type = LAST_DOTDOT;
@@ -1775,29 +1775,32 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                        struct dentry *parent = nd->path.dentry;
                        nd->flags &= ~LOOKUP_JUMPED;
                        if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
+                               struct qstr this = { { .hash_len = hash_len }, .name = name };
                                err = parent->d_op->d_hash(parent, &this);
                                if (err < 0)
                                        break;
+                               hash_len = this.hash_len;
+                               name = this.name;
                        }
                }
 
-               nd->last = this;
+               nd->last.hash_len = hash_len;
+               nd->last.name = name;
                nd->last_type = type;
 
-               if (!name[len])
+               name += hashlen_len(hash_len);
+               if (!*name)
                        return 0;
                /*
                 * If it wasn't NUL, we know it was '/'. Skip that
                 * slash, and continue until no more slashes.
                 */
                do {
-                       len++;
-               } while (unlikely(name[len] == '/'));
-               if (!name[len])
+                       name++;
+               } while (unlikely(*name == '/'));
+               if (!*name)
                        return 0;
 
-               name += len;
-
                err = walk_component(nd, &next, LOOKUP_FOLLOW);
                if (err < 0)
                        return err;
@@ -1852,7 +1855,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        if (*name=='/') {
                if (flags & LOOKUP_RCU) {
                        rcu_read_lock();
-                       set_root_rcu(nd);
+                       nd->seq = set_root_rcu(nd);
                } else {
                        set_root(nd);
                        path_get(&nd->root);
@@ -1903,7 +1906,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        }
 
        nd->inode = nd->path.dentry->d_inode;
-       return 0;
+       if (!(flags & LOOKUP_RCU))
+               return 0;
+       if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
+               return 0;
+       if (!(nd->flags & LOOKUP_ROOT))
+               nd->root.mnt = NULL;
+       rcu_read_unlock();
+       return -ECHILD;
 }
 
 static inline int lookup_last(struct nameidata *nd, struct path *path)
index 1c5ff6d5838585c4b6b0806aa64b579e58a64bc0..6a4f3666e273e33cab30fb4de866ebf79f791dac 100644 (file)
@@ -1412,24 +1412,18 @@ int nfs_fs_proc_net_init(struct net *net)
        p = proc_create("volumes", S_IFREG|S_IRUGO,
                        nn->proc_nfsfs, &nfs_volume_list_fops);
        if (!p)
-               goto error_2;
+               goto error_1;
        return 0;
 
-error_2:
-       remove_proc_entry("servers", nn->proc_nfsfs);
 error_1:
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("nfsfs", net->proc_net);
 error_0:
        return -ENOMEM;
 }
 
 void nfs_fs_proc_net_exit(struct net *net)
 {
-       struct nfs_net *nn = net_generic(net, nfs_net_id);
-
-       remove_proc_entry("volumes", nn->proc_nfsfs);
-       remove_proc_entry("servers", nn->proc_nfsfs);
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("nfsfs", net->proc_net);
 }
 
 /*
index 1359c4a27393a6723fc3b22c244dd6422da4a9f3..90978075f7302a791813b6dd29c0ec9f9e5850eb 100644 (file)
@@ -1269,11 +1269,12 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
 static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx)
 {
        struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
-       struct pnfs_commit_bucket *bucket = fl_cinfo->buckets;
+       struct pnfs_commit_bucket *bucket;
        struct pnfs_layout_segment *freeme;
        int i;
 
-       for (i = idx; i < fl_cinfo->nbuckets; i++, bucket++) {
+       for (i = idx; i < fl_cinfo->nbuckets; i++) {
+               bucket = &fl_cinfo->buckets[i];
                if (list_empty(&bucket->committing))
                        continue;
                nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
index 92193eddb41dc315868af5f437f083a3a4c0302a..a8b855ab4e227b7d9b2c90d911f27ef4d49f9fb7 100644 (file)
@@ -130,16 +130,15 @@ enum {
  */
 
 struct nfs4_lock_state {
-       struct list_head                ls_locks;   /* Other lock stateids */
-       struct nfs4_state *             ls_state;   /* Pointer to open state */
+       struct list_head        ls_locks;       /* Other lock stateids */
+       struct nfs4_state *     ls_state;       /* Pointer to open state */
 #define NFS_LOCK_INITIALIZED 0
 #define NFS_LOCK_LOST        1
-       unsigned long                   ls_flags;
+       unsigned long           ls_flags;
        struct nfs_seqid_counter        ls_seqid;
-       nfs4_stateid                    ls_stateid;
-       atomic_t                        ls_count;
-       fl_owner_t                      ls_owner;
-       struct work_struct              ls_release;
+       nfs4_stateid            ls_stateid;
+       atomic_t                ls_count;
+       fl_owner_t              ls_owner;
 };
 
 /* bits for nfs4_state->flags */
index 53e435a952602aa5037cc8a9605b399f646edc81..ffdb28d86cf81636e22d6e2899a677fc967772d2 100644 (file)
@@ -482,6 +482,16 @@ int nfs40_walk_client_list(struct nfs_client *new,
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+
+               if (pos->rpc_ops != new->rpc_ops)
+                       continue;
+
+               if (pos->cl_proto != new->cl_proto)
+                       continue;
+
+               if (pos->cl_minorversion != new->cl_minorversion)
+                       continue;
+
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos" */
                if (pos->cl_cons_state > NFS_CS_READY) {
@@ -501,15 +511,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->cl_cons_state != NFS_CS_READY)
                        continue;
 
-               if (pos->rpc_ops != new->rpc_ops)
-                       continue;
-
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
-               if (pos->cl_minorversion != new->cl_minorversion)
-                       continue;
-
                if (pos->cl_clientid != new->cl_clientid)
                        continue;
 
@@ -622,6 +623,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+
+               if (pos->rpc_ops != new->rpc_ops)
+                       continue;
+
+               if (pos->cl_proto != new->cl_proto)
+                       continue;
+
+               if (pos->cl_minorversion != new->cl_minorversion)
+                       continue;
+
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos", especially the client
                 * ID and serverowner fields.  Wait for CREATE_SESSION
@@ -647,15 +658,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (pos->cl_cons_state != NFS_CS_READY)
                        continue;
 
-               if (pos->rpc_ops != new->rpc_ops)
-                       continue;
-
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
-               if (pos->cl_minorversion != new->cl_minorversion)
-                       continue;
-
                if (!nfs4_match_clientids(pos, new))
                        continue;
 
index 7dd8aca31c29b9c0079dce94136e70d716072f78..6ca0c8e7a9453d24585aba5e2d9546554353b58f 100644 (file)
@@ -2226,9 +2226,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
        ret = _nfs4_proc_open(opendata);
        if (ret != 0) {
                if (ret == -ENOENT) {
-                       d_drop(opendata->dentry);
-                       d_add(opendata->dentry, NULL);
-                       nfs_set_verifier(opendata->dentry,
+                       dentry = opendata->dentry;
+                       if (dentry->d_inode)
+                               d_delete(dentry);
+                       else if (d_unhashed(dentry))
+                               d_add(dentry, NULL);
+
+                       nfs_set_verifier(dentry,
                                         nfs_save_change_attribute(opendata->dir->d_inode));
                }
                goto out;
@@ -2614,23 +2618,23 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
        is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
        is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
-       /* Calculate the current open share mode */
-       calldata->arg.fmode = 0;
-       if (is_rdonly || is_rdwr)
-               calldata->arg.fmode |= FMODE_READ;
-       if (is_wronly || is_rdwr)
-               calldata->arg.fmode |= FMODE_WRITE;
        /* Calculate the change in open mode */
+       calldata->arg.fmode = 0;
        if (state->n_rdwr == 0) {
-               if (state->n_rdonly == 0) {
-                       call_close |= is_rdonly || is_rdwr;
-                       calldata->arg.fmode &= ~FMODE_READ;
-               }
-               if (state->n_wronly == 0) {
-                       call_close |= is_wronly || is_rdwr;
-                       calldata->arg.fmode &= ~FMODE_WRITE;
-               }
-       }
+               if (state->n_rdonly == 0)
+                       call_close |= is_rdonly;
+               else if (is_rdonly)
+                       calldata->arg.fmode |= FMODE_READ;
+               if (state->n_wronly == 0)
+                       call_close |= is_wronly;
+               else if (is_wronly)
+                       calldata->arg.fmode |= FMODE_WRITE;
+       } else if (is_rdwr)
+               calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
+
+       if (calldata->arg.fmode == 0)
+               call_close |= is_rdwr;
+
        if (!nfs4_valid_open_stateid(state))
                call_close = 0;
        spin_unlock(&state->owner->so_lock);
index a043f618cd5a30ef35a8ec63d54ff12034a2387f..22fe35104c0c1cb30b1d1a27c9996380710a00c6 100644 (file)
@@ -799,18 +799,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
        return NULL;
 }
 
-static void
-free_lock_state_work(struct work_struct *work)
-{
-       struct nfs4_lock_state *lsp = container_of(work,
-                                       struct nfs4_lock_state, ls_release);
-       struct nfs4_state *state = lsp->ls_state;
-       struct nfs_server *server = state->owner->so_server;
-       struct nfs_client *clp = server->nfs_client;
-
-       clp->cl_mvops->free_lock_state(server, lsp);
-}
-
 /*
  * Return a compatible lock_state. If no initialized lock_state structure
  * exists, return an uninitialized one.
@@ -832,7 +820,6 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
        if (lsp->ls_seqid.owner_id < 0)
                goto out_free;
        INIT_LIST_HEAD(&lsp->ls_locks);
-       INIT_WORK(&lsp->ls_release, free_lock_state_work);
        return lsp;
 out_free:
        kfree(lsp);
@@ -896,12 +883,13 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
        if (list_empty(&state->lock_states))
                clear_bit(LK_STATE_IN_USE, &state->flags);
        spin_unlock(&state->state_lock);
-       if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags))
-               queue_work(nfsiod_workqueue, &lsp->ls_release);
-       else {
-               server = state->owner->so_server;
+       server = state->owner->so_server;
+       if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
+               struct nfs_client *clp = server->nfs_client;
+
+               clp->cl_mvops->free_lock_state(server, lsp);
+       } else
                nfs4_free_lock_state(server, lsp);
-       }
 }
 
 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
index f9821ce6658a4b09006e7d51d575319658753501..b01f6e100ee80e6d26c111a2eeaa63d6be8f639a 100644 (file)
@@ -2657,6 +2657,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        struct xdr_stream *xdr = cd->xdr;
        int start_offset = xdr->buf->len;
        int cookie_offset;
+       u32 name_and_cookie;
        int entry_bytes;
        __be32 nfserr = nfserr_toosmall;
        __be64 wire_offset;
@@ -2718,7 +2719,14 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        cd->rd_maxcount -= entry_bytes;
        if (!cd->rd_dircount)
                goto fail;
-       cd->rd_dircount--;
+       /*
+        * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
+        * let's always let through the first entry, at least:
+        */
+       name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8;
+       if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+               goto fail;
+       cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
        cd->cookie_offset = cookie_offset;
 skip_entry:
        cd->common.err = nfs_ok;
@@ -3096,7 +3104,8 @@ static __be32 nfsd4_encode_splice_read(
 
        buf->page_len = maxcount;
        buf->len += maxcount;
-       xdr->page_ptr += (maxcount + PAGE_SIZE - 1) / PAGE_SIZE;
+       xdr->page_ptr += (buf->page_base + maxcount + PAGE_SIZE - 1)
+                                                       / PAGE_SIZE;
 
        /* Use rest of head for padding and remaining ops: */
        buf->tail[0].iov_base = xdr->p;
@@ -3321,6 +3330,10 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
        }
        maxcount = min_t(int, maxcount-16, bytes_left);
 
+       /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+       if (!readdir->rd_dircount)
+               readdir->rd_dircount = INT_MAX;
+
        readdir->xdr = xdr;
        readdir->rd_maxcount = maxcount;
        readdir->common.err = 0;
index 6252b173a46590225e2ba29f7e07cf13aa62eeac..d071e7f23de2a767929338309a5e5c73b5942f97 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/buffer_head.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
+#include <linux/pagemap.h>
 #include <linux/writeback.h>
 #include <linux/aio.h>
 #include "nilfs.h"
@@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 
 static int nilfs_set_page_dirty(struct page *page)
 {
+       struct inode *inode = page->mapping->host;
        int ret = __set_page_dirty_nobuffers(page);
 
        if (page_has_buffers(page)) {
-               struct inode *inode = page->mapping->host;
                unsigned nr_dirty = 0;
                struct buffer_head *bh, *head;
 
@@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page)
 
                if (nr_dirty)
                        nilfs_set_file_dirty(inode, nr_dirty);
+       } else if (ret) {
+               unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+               nilfs_set_file_dirty(inode, nr_dirty);
        }
        return ret;
 }
index 238a5930cb3c7d16e1c76952e66c6bf24f5299ae..9d7e2b9659cbdf2687e26bbbf0dc784ba7cced6d 100644 (file)
@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
 {
        struct {
                struct file_handle handle;
-               u8 pad[64];
+               u8 pad[MAX_HANDLE_SZ];
        } f;
        int size, ret, i;
 
@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
        size = f.handle.handle_bytes >> 2;
 
        ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
-       if ((ret == 255) || (ret == -ENOSPC)) {
+       if ((ret == FILEID_INVALID) || (ret < 0)) {
                WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
                return 0;
        }
index 3ec906ef5d9a622ff4b130f12907d503bd89c3e0..12ba682fc53c303964ee4eb2c0035c88765573d0 100644 (file)
@@ -655,12 +655,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
        clear_bit(bit, res->refmap);
 }
 
-
-void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res)
 {
-       assert_spin_locked(&res->spinlock);
-
        res->inflight_locks++;
 
        mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
@@ -668,6 +665,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
             __builtin_return_address(0));
 }
 
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       __dlm_lockres_grab_inflight_ref(dlm, res);
+}
+
 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res)
 {
@@ -894,10 +898,8 @@ lookup:
        /* finally add the lockres to its hash bucket */
        __dlm_insert_lockres(dlm, res);
 
-       /* Grab inflight ref to pin the resource */
-       spin_lock(&res->spinlock);
-       dlm_lockres_grab_inflight_ref(dlm, res);
-       spin_unlock(&res->spinlock);
+       /* since this lockres is new it doesn't not require the spinlock */
+       __dlm_lockres_grab_inflight_ref(dlm, res);
 
        /* get an extra ref on the mle in case this is a BLOCK
         * if so, the creator of the BLOCK may try to put the last
@@ -2037,6 +2039,10 @@ kill:
             "and killing the other node now!  This node is OK and can continue.\n");
        __dlm_print_one_lock_resource(res);
        spin_unlock(&res->spinlock);
+       spin_lock(&dlm->master_lock);
+       if (mle)
+               __dlm_put_mle(mle);
+       spin_unlock(&dlm->master_lock);
        spin_unlock(&dlm->spinlock);
        *ret_data = (void *)res;
        dlm_put(dlm);
index ddb662b32447ca49cd206c3f4e1394ee558ceeb3..4142546aedae95e668487b9ba8069be1ec929227 100644 (file)
@@ -2532,6 +2532,7 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
        kfree(osb->journal);
        kfree(osb->local_alloc_copy);
        kfree(osb->uuid_str);
+       kfree(osb->vol_label);
        ocfs2_put_dlm_debug(osb->osb_dlm_debug);
        memset(osb, 0, sizeof(struct ocfs2_super));
 }
index dfc791c42d6491c6d1a4537e6ee5d7c9bd21a24b..c34156888d706d444a45fd171299ff9548bab93c 100644 (file)
@@ -931,23 +931,32 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
        while (addr < end) {
                struct vm_area_struct *vma = find_vma(walk->mm, addr);
                pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
-               unsigned long vm_end;
+               /* End of address space hole, which we mark as non-present. */
+               unsigned long hole_end;
 
-               if (!vma) {
-                       vm_end = end;
-               } else {
-                       vm_end = min(end, vma->vm_end);
-                       if (vma->vm_flags & VM_SOFTDIRTY)
-                               pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
+               if (vma)
+                       hole_end = min(end, vma->vm_start);
+               else
+                       hole_end = end;
+
+               for (; addr < hole_end; addr += PAGE_SIZE) {
+                       err = add_to_pagemap(addr, &pme, pm);
+                       if (err)
+                               goto out;
                }
 
-               for (; addr < vm_end; addr += PAGE_SIZE) {
+               if (!vma)
+                       break;
+
+               /* Addresses in the VMA. */
+               if (vma->vm_flags & VM_SOFTDIRTY)
+                       pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
+               for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
                        err = add_to_pagemap(addr, &pme, pm);
                        if (err)
                                goto out;
                }
        }
-
 out:
        return err;
 }
index 6eaf5edf1ea1577e88cafc60184963e1b18df5a5..e77db621ec8985ad21878c45e970ebc3f23258cb 100644 (file)
@@ -45,7 +45,7 @@ void udf_free_inode(struct inode *inode)
        udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
 }
 
-struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
+struct inode *udf_new_inode(struct inode *dir, umode_t mode)
 {
        struct super_block *sb = dir->i_sb;
        struct udf_sb_info *sbi = UDF_SB(sb);
@@ -55,14 +55,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        struct udf_inode_info *iinfo;
        struct udf_inode_info *dinfo = UDF_I(dir);
        struct logicalVolIntegrityDescImpUse *lvidiu;
+       int err;
 
        inode = new_inode(sb);
 
-       if (!inode) {
-               *err = -ENOMEM;
-               return NULL;
-       }
-       *err = -ENOSPC;
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
 
        iinfo = UDF_I(inode);
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
@@ -80,21 +78,22 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        }
        if (!iinfo->i_ext.i_data) {
                iput(inode);
-               *err = -ENOMEM;
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
+       err = -ENOSPC;
        block = udf_new_block(dir->i_sb, NULL,
                              dinfo->i_location.partitionReferenceNum,
-                             start, err);
-       if (*err) {
+                             start, &err);
+       if (err) {
                iput(inode);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        lvidiu = udf_sb_lvidiu(sb);
        if (lvidiu) {
                iinfo->i_unique = lvid_get_unique_id(sb);
+               inode->i_generation = iinfo->i_unique;
                mutex_lock(&sbi->s_alloc_mutex);
                if (S_ISDIR(mode))
                        le32_add_cpu(&lvidiu->numDirs, 1);
@@ -123,9 +122,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
                iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
        inode->i_mtime = inode->i_atime = inode->i_ctime =
                iinfo->i_crtime = current_fs_time(inode->i_sb);
-       insert_inode_hash(inode);
+       if (unlikely(insert_inode_locked(inode) < 0)) {
+               make_bad_inode(inode);
+               iput(inode);
+               return ERR_PTR(-EIO);
+       }
        mark_inode_dirty(inode);
 
-       *err = 0;
        return inode;
 }
index 236cd48184c2df20e75bc9fee098ded782f2560f..08598843288fe0c2dd206940d6437c606ef41c1f 100644 (file)
@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL");
 
 static umode_t udf_convert_permissions(struct fileEntry *);
 static int udf_update_inode(struct inode *, int);
-static void udf_fill_inode(struct inode *, struct buffer_head *);
 static int udf_sync_inode(struct inode *inode);
 static int udf_alloc_i_data(struct inode *inode, size_t size);
 static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
@@ -1271,12 +1270,33 @@ update_time:
        return 0;
 }
 
-static void __udf_read_inode(struct inode *inode)
+/*
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
+ * inode on write-once media but avoid looping for too long on corrupted media.
+ */
+#define UDF_MAX_ICB_NESTING 1024
+
+static int udf_read_inode(struct inode *inode)
 {
        struct buffer_head *bh = NULL;
        struct fileEntry *fe;
+       struct extendedFileEntry *efe;
        uint16_t ident;
        struct udf_inode_info *iinfo = UDF_I(inode);
+       struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
+       struct kernel_lb_addr *iloc = &iinfo->i_location;
+       unsigned int link_count;
+       unsigned int indirections = 0;
+       int ret = -EIO;
+
+reread:
+       if (iloc->logicalBlockNum >=
+           sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
+               udf_debug("block=%d, partition=%d out of range\n",
+                         iloc->logicalBlockNum, iloc->partitionReferenceNum);
+               return -EIO;
+       }
 
        /*
         * Set defaults, but the inode is still incomplete!
@@ -1290,78 +1310,54 @@ static void __udf_read_inode(struct inode *inode)
         *      i_nlink = 1
         *      i_op = NULL;
         */
-       bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
+       bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
        if (!bh) {
                udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
-               make_bad_inode(inode);
-               return;
+               return -EIO;
        }
 
        if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
            ident != TAG_IDENT_USE) {
                udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
                        inode->i_ino, ident);
-               brelse(bh);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
 
        fe = (struct fileEntry *)bh->b_data;
+       efe = (struct extendedFileEntry *)bh->b_data;
 
        if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
                struct buffer_head *ibh;
 
-               ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
-                                       &ident);
+               ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
                if (ident == TAG_IDENT_IE && ibh) {
-                       struct buffer_head *nbh = NULL;
                        struct kernel_lb_addr loc;
                        struct indirectEntry *ie;
 
                        ie = (struct indirectEntry *)ibh->b_data;
                        loc = lelb_to_cpu(ie->indirectICB.extLocation);
 
-                       if (ie->indirectICB.extLength &&
-                               (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
-                                                       &ident))) {
-                               if (ident == TAG_IDENT_FE ||
-                                       ident == TAG_IDENT_EFE) {
-                                       memcpy(&iinfo->i_location,
-                                               &loc,
-                                               sizeof(struct kernel_lb_addr));
-                                       brelse(bh);
-                                       brelse(ibh);
-                                       brelse(nbh);
-                                       __udf_read_inode(inode);
-                                       return;
+                       if (ie->indirectICB.extLength) {
+                               brelse(ibh);
+                               memcpy(&iinfo->i_location, &loc,
+                                      sizeof(struct kernel_lb_addr));
+                               if (++indirections > UDF_MAX_ICB_NESTING) {
+                                       udf_err(inode->i_sb,
+                                               "too many ICBs in ICB hierarchy"
+                                               " (max %d supported)\n",
+                                               UDF_MAX_ICB_NESTING);
+                                       goto out;
                                }
-                               brelse(nbh);
+                               brelse(bh);
+                               goto reread;
                        }
                }
                brelse(ibh);
        } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
                udf_err(inode->i_sb, "unsupported strategy type: %d\n",
                        le16_to_cpu(fe->icbTag.strategyType));
-               brelse(bh);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
-       udf_fill_inode(inode, bh);
-
-       brelse(bh);
-}
-
-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
-{
-       struct fileEntry *fe;
-       struct extendedFileEntry *efe;
-       struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
-       struct udf_inode_info *iinfo = UDF_I(inode);
-       unsigned int link_count;
-
-       fe = (struct fileEntry *)bh->b_data;
-       efe = (struct extendedFileEntry *)bh->b_data;
-
        if (fe->icbTag.strategyType == cpu_to_le16(4))
                iinfo->i_strat4096 = 0;
        else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
@@ -1378,11 +1374,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
                iinfo->i_efe = 1;
                iinfo->i_use = 0;
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                       sizeof(struct extendedFileEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                       sizeof(struct extendedFileEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct extendedFileEntry),
                       inode->i_sb->s_blocksize -
@@ -1390,11 +1385,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
                iinfo->i_efe = 0;
                iinfo->i_use = 0;
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                               sizeof(struct fileEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                               sizeof(struct fileEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct fileEntry),
                       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
@@ -1404,18 +1398,18 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                iinfo->i_lenAlloc = le32_to_cpu(
                                ((struct unallocSpaceEntry *)bh->b_data)->
                                 lengthAllocDescs);
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                       sizeof(struct unallocSpaceEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                       sizeof(struct unallocSpaceEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct unallocSpaceEntry),
                       inode->i_sb->s_blocksize -
                                        sizeof(struct unallocSpaceEntry));
-               return;
+               return 0;
        }
 
+       ret = -EIO;
        read_lock(&sbi->s_cred_lock);
        i_uid_write(inode, le32_to_cpu(fe->uid));
        if (!uid_valid(inode->i_uid) ||
@@ -1441,8 +1435,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        read_unlock(&sbi->s_cred_lock);
 
        link_count = le16_to_cpu(fe->fileLinkCount);
-       if (!link_count)
-               link_count = 1;
+       if (!link_count) {
+               ret = -ESTALE;
+               goto out;
+       }
        set_nlink(inode, link_count);
 
        inode->i_size = le64_to_cpu(fe->informationLength);
@@ -1488,6 +1484,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
                iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
        }
+       inode->i_generation = iinfo->i_unique;
 
        switch (fe->icbTag.fileType) {
        case ICBTAG_FILE_TYPE_DIRECTORY:
@@ -1537,8 +1534,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        default:
                udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
                        inode->i_ino, fe->icbTag.fileType);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
                struct deviceSpec *dsea =
@@ -1549,8 +1545,12 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                                      le32_to_cpu(dsea->minorDeviceIdent)));
                        /* Developer ID ??? */
                } else
-                       make_bad_inode(inode);
+                       goto out;
        }
+       ret = 0;
+out:
+       brelse(bh);
+       return ret;
 }
 
 static int udf_alloc_i_data(struct inode *inode, size_t size)
@@ -1664,7 +1664,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                     FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
        fe->permissions = cpu_to_le32(udfperms);
 
-       if (S_ISDIR(inode->i_mode))
+       if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
                fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
        else
                fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
@@ -1830,32 +1830,23 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
 {
        unsigned long block = udf_get_lb_pblock(sb, ino, 0);
        struct inode *inode = iget_locked(sb, block);
+       int err;
 
        if (!inode)
-               return NULL;
-
-       if (inode->i_state & I_NEW) {
-               memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
-               __udf_read_inode(inode);
-               unlock_new_inode(inode);
-       }
+               return ERR_PTR(-ENOMEM);
 
-       if (is_bad_inode(inode))
-               goto out_iput;
+       if (!(inode->i_state & I_NEW))
+               return inode;
 
-       if (ino->logicalBlockNum >= UDF_SB(sb)->
-                       s_partmaps[ino->partitionReferenceNum].s_partition_len) {
-               udf_debug("block=%d, partition=%d out of range\n",
-                         ino->logicalBlockNum, ino->partitionReferenceNum);
-               make_bad_inode(inode);
-               goto out_iput;
+       memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
+       err = udf_read_inode(inode);
+       if (err < 0) {
+               iget_failed(inode);
+               return ERR_PTR(err);
        }
+       unlock_new_inode(inode);
 
        return inode;
-
- out_iput:
-       iput(inode);
-       return NULL;
 }
 
 int udf_add_aext(struct inode *inode, struct extent_position *epos,
index 83a06001742bfaab6781bc26509517196fc3af16..c12e260fd6c417eb9c690782b8860f0e5eeff8d9 100644 (file)
@@ -270,9 +270,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
                                                NULL, 0),
                };
                inode = udf_iget(dir->i_sb, lb);
-               if (!inode) {
-                       return ERR_PTR(-EACCES);
-               }
+               if (IS_ERR(inode))
+                       return inode;
        } else
 #endif /* UDF_RECOVERY */
 
@@ -285,9 +284,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
 
                loc = lelb_to_cpu(cfi.icb.extLocation);
                inode = udf_iget(dir->i_sb, &loc);
-               if (!inode) {
-                       return ERR_PTR(-EACCES);
-               }
+               if (IS_ERR(inode))
+                       return ERR_CAST(inode);
        }
 
        return d_splice_alias(inode, dentry);
@@ -550,32 +548,18 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
        return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
 }
 
-static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
-                     bool excl)
+static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
 {
+       struct udf_inode_info *iinfo = UDF_I(inode);
+       struct inode *dir = dentry->d_parent->d_inode;
        struct udf_fileident_bh fibh;
-       struct inode *inode;
        struct fileIdentDesc cfi, *fi;
        int err;
-       struct udf_inode_info *iinfo;
-
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode) {
-               return err;
-       }
-
-       iinfo = UDF_I(inode);
-       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               inode->i_data.a_ops = &udf_adinicb_aops;
-       else
-               inode->i_data.a_ops = &udf_aops;
-       inode->i_op = &udf_file_inode_operations;
-       inode->i_fop = &udf_file_operations;
-       mark_inode_dirty(inode);
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi) {
+       if (unlikely(!fi)) {
                inode_dec_link_count(inode);
+               unlock_new_inode(inode);
                iput(inode);
                return err;
        }
@@ -589,23 +573,21 @@ static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
        brelse(fibh.sbh);
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
 
        return 0;
 }
 
-static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+                     bool excl)
 {
-       struct inode *inode;
-       struct udf_inode_info *iinfo;
-       int err;
+       struct inode *inode = udf_new_inode(dir, mode);
 
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode)
-               return err;
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
-       iinfo = UDF_I(inode);
-       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+       if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
                inode->i_data.a_ops = &udf_adinicb_aops;
        else
                inode->i_data.a_ops = &udf_aops;
@@ -613,7 +595,25 @@ static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_fop = &udf_file_operations;
        mark_inode_dirty(inode);
 
+       return udf_add_nondir(dentry, inode);
+}
+
+static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+       struct inode *inode = udf_new_inode(dir, mode);
+
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
+
+       if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+               inode->i_data.a_ops = &udf_adinicb_aops;
+       else
+               inode->i_data.a_ops = &udf_aops;
+       inode->i_op = &udf_file_inode_operations;
+       inode->i_fop = &udf_file_operations;
+       mark_inode_dirty(inode);
        d_tmpfile(dentry, inode);
+       unlock_new_inode(inode);
        return 0;
 }
 
@@ -621,44 +621,16 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
                     dev_t rdev)
 {
        struct inode *inode;
-       struct udf_fileident_bh fibh;
-       struct fileIdentDesc cfi, *fi;
-       int err;
-       struct udf_inode_info *iinfo;
 
        if (!old_valid_dev(rdev))
                return -EINVAL;
 
-       err = -EIO;
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode)
-               goto out;
+       inode = udf_new_inode(dir, mode);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
-       iinfo = UDF_I(inode);
        init_special_inode(inode, mode, rdev);
-       fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi) {
-               inode_dec_link_count(inode);
-               iput(inode);
-               return err;
-       }
-       cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
-       cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-       *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-               cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
-       udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-       if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               mark_inode_dirty(dir);
-       mark_inode_dirty(inode);
-
-       if (fibh.sbh != fibh.ebh)
-               brelse(fibh.ebh);
-       brelse(fibh.sbh);
-       d_instantiate(dentry, inode);
-       err = 0;
-
-out:
-       return err;
+       return udf_add_nondir(dentry, inode);
 }
 
 static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -670,10 +642,9 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct udf_inode_info *dinfo = UDF_I(dir);
        struct udf_inode_info *iinfo;
 
-       err = -EIO;
-       inode = udf_new_inode(dir, S_IFDIR | mode, &err);
-       if (!inode)
-               goto out;
+       inode = udf_new_inode(dir, S_IFDIR | mode);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
        iinfo = UDF_I(inode);
        inode->i_op = &udf_dir_inode_operations;
@@ -681,6 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
        if (!fi) {
                inode_dec_link_count(inode);
+               unlock_new_inode(inode);
                iput(inode);
                goto out;
        }
@@ -699,6 +671,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (!fi) {
                clear_nlink(inode);
                mark_inode_dirty(inode);
+               unlock_new_inode(inode);
                iput(inode);
                goto out;
        }
@@ -710,6 +683,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
        inc_nlink(dir);
        mark_inode_dirty(dir);
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
@@ -876,14 +850,11 @@ out:
 static int udf_symlink(struct inode *dir, struct dentry *dentry,
                       const char *symname)
 {
-       struct inode *inode;
+       struct inode *inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO);
        struct pathComponent *pc;
        const char *compstart;
-       struct udf_fileident_bh fibh;
        struct extent_position epos = {};
        int eoffset, elen = 0;
-       struct fileIdentDesc *fi;
-       struct fileIdentDesc cfi;
        uint8_t *ea;
        int err;
        int block;
@@ -892,9 +863,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
        struct udf_inode_info *iinfo;
        struct super_block *sb = dir->i_sb;
 
-       inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
-       if (!inode)
-               goto out;
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
        iinfo = UDF_I(inode);
        down_write(&iinfo->i_data_sem);
@@ -1012,32 +982,15 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
        mark_inode_dirty(inode);
        up_write(&iinfo->i_data_sem);
 
-       fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi)
-               goto out_fail;
-       cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
-       cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-       if (UDF_SB(inode->i_sb)->s_lvid_bh) {
-               *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-                       cpu_to_le32(lvid_get_unique_id(sb));
-       }
-       udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-       if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               mark_inode_dirty(dir);
-       if (fibh.sbh != fibh.ebh)
-               brelse(fibh.ebh);
-       brelse(fibh.sbh);
-       d_instantiate(dentry, inode);
-       err = 0;
-
+       err = udf_add_nondir(dentry, inode);
 out:
        kfree(name);
        return err;
 
 out_no_entry:
        up_write(&iinfo->i_data_sem);
-out_fail:
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        goto out;
 }
@@ -1222,7 +1175,7 @@ static struct dentry *udf_get_parent(struct dentry *child)
        struct udf_fileident_bh fibh;
 
        if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
-               goto out_unlock;
+               return ERR_PTR(-EACCES);
 
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
@@ -1230,12 +1183,10 @@ static struct dentry *udf_get_parent(struct dentry *child)
 
        tloc = lelb_to_cpu(cfi.icb.extLocation);
        inode = udf_iget(child->d_inode->i_sb, &tloc);
-       if (!inode)
-               goto out_unlock;
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
 
        return d_obtain_alias(inode);
-out_unlock:
-       return ERR_PTR(-EACCES);
 }
 
 
@@ -1252,8 +1203,8 @@ static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
        loc.partitionReferenceNum = partref;
        inode = udf_iget(sb, &loc);
 
-       if (inode == NULL)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
 
        if (generation && inode->i_generation != generation) {
                iput(inode);
index 813da94d447b3b44418d2ce84bc70121cb62c92e..5401fc33f5cc906b76b846e62f74751cabd50a68 100644 (file)
@@ -961,12 +961,14 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 
        metadata_fe = udf_iget(sb, &addr);
 
-       if (metadata_fe == NULL)
+       if (IS_ERR(metadata_fe)) {
                udf_warn(sb, "metadata inode efe not found\n");
-       else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
+               return metadata_fe;
+       }
+       if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
                udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
                iput(metadata_fe);
-               metadata_fe = NULL;
+               return ERR_PTR(-EIO);
        }
 
        return metadata_fe;
@@ -978,6 +980,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        struct udf_part_map *map;
        struct udf_meta_data *mdata;
        struct kernel_lb_addr addr;
+       struct inode *fe;
 
        map = &sbi->s_partmaps[partition];
        mdata = &map->s_type_specific.s_metadata;
@@ -986,22 +989,24 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        udf_debug("Metadata file location: block = %d part = %d\n",
                  mdata->s_meta_file_loc, map->s_partition_num);
 
-       mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
-               mdata->s_meta_file_loc, map->s_partition_num);
-
-       if (mdata->s_metadata_fe == NULL) {
+       fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
+                                        map->s_partition_num);
+       if (IS_ERR(fe)) {
                /* mirror file entry */
                udf_debug("Mirror metadata file location: block = %d part = %d\n",
                          mdata->s_mirror_file_loc, map->s_partition_num);
 
-               mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
-                       mdata->s_mirror_file_loc, map->s_partition_num);
+               fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
+                                                map->s_partition_num);
 
-               if (mdata->s_mirror_fe == NULL) {
+               if (IS_ERR(fe)) {
                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
-                       return -EIO;
+                       return PTR_ERR(fe);
                }
-       }
+               mdata->s_mirror_fe = fe;
+       } else
+               mdata->s_metadata_fe = fe;
+
 
        /*
         * bitmap file entry
@@ -1015,15 +1020,16 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
                udf_debug("Bitmap file location: block = %d part = %d\n",
                          addr.logicalBlockNum, addr.partitionReferenceNum);
 
-               mdata->s_bitmap_fe = udf_iget(sb, &addr);
-               if (mdata->s_bitmap_fe == NULL) {
+               fe = udf_iget(sb, &addr);
+               if (IS_ERR(fe)) {
                        if (sb->s_flags & MS_RDONLY)
                                udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
                        else {
                                udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
-                               return -EIO;
+                               return PTR_ERR(fe);
                        }
-               }
+               } else
+                       mdata->s_bitmap_fe = fe;
        }
 
        udf_debug("udf_load_metadata_files Ok\n");
@@ -1111,13 +1117,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                                phd->unallocSpaceTable.extPosition),
                        .partitionReferenceNum = p_index,
                };
+               struct inode *inode;
 
-               map->s_uspace.s_table = udf_iget(sb, &loc);
-               if (!map->s_uspace.s_table) {
+               inode = udf_iget(sb, &loc);
+               if (IS_ERR(inode)) {
                        udf_debug("cannot load unallocSpaceTable (part %d)\n",
                                  p_index);
-                       return -EIO;
+                       return PTR_ERR(inode);
                }
+               map->s_uspace.s_table = inode;
                map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
                udf_debug("unallocSpaceTable (part %d) @ %ld\n",
                          p_index, map->s_uspace.s_table->i_ino);
@@ -1144,14 +1152,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                                phd->freedSpaceTable.extPosition),
                        .partitionReferenceNum = p_index,
                };
+               struct inode *inode;
 
-               map->s_fspace.s_table = udf_iget(sb, &loc);
-               if (!map->s_fspace.s_table) {
+               inode = udf_iget(sb, &loc);
+               if (IS_ERR(inode)) {
                        udf_debug("cannot load freedSpaceTable (part %d)\n",
                                  p_index);
-                       return -EIO;
+                       return PTR_ERR(inode);
                }
-
+               map->s_fspace.s_table = inode;
                map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
                udf_debug("freedSpaceTable (part %d) @ %ld\n",
                          p_index, map->s_fspace.s_table->i_ino);
@@ -1178,6 +1187,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
        struct udf_part_map *map = &sbi->s_partmaps[p_index];
        sector_t vat_block;
        struct kernel_lb_addr ino;
+       struct inode *inode;
 
        /*
         * VAT file entry is in the last recorded block. Some broken disks have
@@ -1186,10 +1196,13 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
        ino.partitionReferenceNum = type1_index;
        for (vat_block = start_block;
             vat_block >= map->s_partition_root &&
-            vat_block >= start_block - 3 &&
-            !sbi->s_vat_inode; vat_block--) {
+            vat_block >= start_block - 3; vat_block--) {
                ino.logicalBlockNum = vat_block - map->s_partition_root;
-               sbi->s_vat_inode = udf_iget(sb, &ino);
+               inode = udf_iget(sb, &ino);
+               if (!IS_ERR(inode)) {
+                       sbi->s_vat_inode = inode;
+                       break;
+               }
        }
 }
 
@@ -2205,10 +2218,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        /* assign inodes by physical block number */
        /* perhaps it's not extensible enough, but for now ... */
        inode = udf_iget(sb, &rootdir);
-       if (!inode) {
+       if (IS_ERR(inode)) {
                udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
                       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
-               ret = -EIO;
+               ret = PTR_ERR(inode);
                goto error_out;
        }
 
index be7dabbbcb49ef171b33e0ec2bc4a672c073282d..742557be9936399fba764f645c04b969d2a3cfd5 100644 (file)
@@ -143,7 +143,6 @@ extern int udf_expand_file_adinicb(struct inode *);
 extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
 extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
 extern int udf_setsize(struct inode *, loff_t);
-extern void udf_read_inode(struct inode *);
 extern void udf_evict_inode(struct inode *);
 extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
 extern long udf_block_map(struct inode *, sector_t);
@@ -209,7 +208,7 @@ extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
-extern struct inode *udf_new_inode(struct inode *, umode_t, int *);
+extern struct inode *udf_new_inode(struct inode *, umode_t);
 
 /* truncate.c */
 extern void udf_truncate_tail_extent(struct inode *);
index a9cc75ffa925f995c35bc592651681ee3ff63e24..7caa016528883c6d8b8e7234ef0adc7ce23c6044 100644 (file)
@@ -298,7 +298,10 @@ cg_found:
        ufsi->i_oeftflag = 0;
        ufsi->i_dir_start_lookup = 0;
        memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
-       insert_inode_hash(inode);
+       if (insert_inode_locked(inode) < 0) {
+               err = -EIO;
+               goto failed;
+       }
        mark_inode_dirty(inode);
 
        if (uspi->fs_magic == UFS2_MAGIC) {
@@ -337,6 +340,7 @@ cg_found:
 fail_remove_inode:
        unlock_ufs(sb);
        clear_nlink(inode);
+       unlock_new_inode(inode);
        iput(inode);
        UFSD("EXIT (FAILED): err %d\n", err);
        return ERR_PTR(err);
index 2df62a73f20ce2071c17d81c343ada85ae287d2a..fd65deb4b5f095e3103028b83f214ffba73707f7 100644 (file)
@@ -38,10 +38,12 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
 {
        int err = ufs_add_link(dentry, inode);
        if (!err) {
+               unlock_new_inode(inode);
                d_instantiate(dentry, inode);
                return 0;
        }
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        return err;
 }
@@ -155,6 +157,7 @@ out_notlocked:
 
 out_fail:
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        goto out;
 }
@@ -210,6 +213,7 @@ out:
 out_fail:
        inode_dec_link_count(inode);
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput (inode);
        inode_dec_link_count(dir);
        unlock_ufs(dir->i_sb);
index c1c9de19edbe8abc62f2508bf9a7723bd9027d08..57ee0528aacb16db912a11571c518bd0fc9eb5b5 100644 (file)
@@ -118,6 +118,7 @@ struct acpi_device;
 struct acpi_hotplug_profile {
        struct kobject kobj;
        int (*scan_dependent)(struct acpi_device *adev);
+       void (*notify_online)(struct acpi_device *adev);
        bool enabled:1;
        bool demand_offline:1;
 };
@@ -204,10 +205,9 @@ struct acpi_device_flags {
        u32 match_driver:1;
        u32 initialized:1;
        u32 visited:1;
-       u32 no_hotplug:1;
        u32 hotplug_notify:1;
        u32 is_dock_station:1;
-       u32 reserved:22;
+       u32 reserved:23;
 };
 
 /* File System */
@@ -411,7 +411,6 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
 int acpi_bus_get_private_data(acpi_handle, void **);
 int acpi_bus_attach_private_data(acpi_handle, void *);
 void acpi_bus_detach_private_data(acpi_handle);
-void acpi_bus_no_hotplug(acpi_handle handle);
 extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
 extern int register_acpi_notifier(struct notifier_block *);
 extern int unregister_acpi_notifier(struct notifier_block *);
index 831d786976c56fe64c71fa7f3f6c400ea4ca7eb4..882675e7c055d8248c724081b199b77e3785066b 100644 (file)
@@ -162,12 +162,25 @@ static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
 
 static inline size_t drbg_max_addtl(struct drbg_state *drbg)
 {
+#if (__BITS_PER_LONG == 32)
+       /*
+        * SP800-90A allows smaller maximum numbers to be returned -- we
+        * return SIZE_MAX - 1 to allow the verification of the enforcement
+        * of this value in drbg_healthcheck_sanity.
+        */
+       return (SIZE_MAX - 1);
+#else
        return (1UL<<(drbg->core->max_addtllen));
+#endif
 }
 
 static inline size_t drbg_max_requests(struct drbg_state *drbg)
 {
+#if (__BITS_PER_LONG == 32)
+       return SIZE_MAX;
+#else
        return (1UL<<(drbg->core->max_req));
+#endif
 }
 
 /*
index ebcc9d1462198d990a9a473f0fecd435045f7b53..7f437036baa4bec9efa4fd1f8b619f9849889218 100644 (file)
@@ -26,6 +26,13 @@ struct ccp_cmd;
 #if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
        defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
 
+/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void);
+
 /**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
 
 #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
 
+static inline int ccp_present(void)
+{
+       return -ENODEV;
+}
+
 static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
        return -ENODEV;
index ade2390ffe92baec361677b16f700de9f379ee74..6e39c9bb0dae2b7c2f6738139aba7fed96b74549 100644 (file)
@@ -93,12 +93,12 @@ extern int cpuset_slab_spread_node(void);
 
 static inline int cpuset_do_page_mem_spread(void)
 {
-       return current->flags & PF_SPREAD_PAGE;
+       return task_spread_page(current);
 }
 
 static inline int cpuset_do_slab_mem_spread(void)
 {
-       return current->flags & PF_SPREAD_SLAB;
+       return task_spread_slab(current);
 }
 
 extern int current_cpuset_is_being_rebound(void);
index e4ae2ad48d072efcd78e3905c3f1e212130c88cc..75a227cc7ce20a70f9a7e066059a5b8317696c82 100644 (file)
@@ -55,6 +55,7 @@ struct qstr {
 #define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
 #define hashlen_hash(hashlen) ((u32) (hashlen))
 #define hashlen_len(hashlen)  ((u32)((hashlen) >> 32))
+#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
 
 struct dentry_stat_t {
        long nr_dentry;
index bd1754c7ecef49a121a74fea6efaac15234384a5..d0494c399392c801dfce96d8a141d935d750a22e 100644 (file)
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+       hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
        /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
        u64 n = hash;
        n <<= 18;
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
        hash += n;
        n <<= 2;
        hash += n;
+#endif
 
        /* High bits are more random, so use them. */
        return hash >> (64 - bits);
index a95efeb53a8b76da08450f47f614f9713dc89a9f..b556e0ab946fbffd18d246435dc65663e188db54 100644 (file)
@@ -577,20 +577,4 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
 }
 #endif /* CONFIG_OF */
 
-#ifdef CONFIG_ACPI
-void acpi_i2c_register_devices(struct i2c_adapter *adap);
-#else
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
-#endif /* CONFIG_ACPI */
-
-#ifdef CONFIG_ACPI_I2C_OPREGION
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
-#else
-static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
-{ }
-static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
-{ return 0; }
-#endif /* CONFIG_ACPI_I2C_OPREGION */
-
 #endif /* _LINUX_I2C_H */
index 4b79ffe7b18868a65e95849ad128e5f096760cb7..fa76c79a52a1bf5c209e6cd8a8df7dc2cad66d28 100644 (file)
@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
        put_device(&trig->dev);
 }
 
-static inline void iio_trigger_get(struct iio_trigger *trig)
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
 {
        get_device(&trig->dev);
        __module_get(trig->ops->owner);
+
+       return trig;
 }
 
 /**
index 1f44466c1e9d7b056cbdcd4f7c043018f069fad4..c367cbdf73ab1a5b83f1af48c848be21b466167d 100644 (file)
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
 #define SEC_JIFFIE_SC (32 - SHIFT_HZ)
 #endif
 #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
 #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
                                 TICK_NSEC -1) / (u64)TICK_NSEC))
 
 #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
                                         TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION  \
-                    ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
-                                        TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion.  See there
- * for more details.  It is the scaled resolution rounding value.  Note
- * that it is a 64-bit value.  Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
 /*
  * The maximum jiffie value is (MAX_INT >> 1).  Here we translate that
  * into seconds.  The 64-bit case will overflow if we are not careful,
index 071f6b234604cf770ebfab55e1ec61392e364a83..a5b7d7cfcedfaac0fe3f3ca4aabfd1dae91b2b53 100644 (file)
@@ -209,6 +209,7 @@ enum {
        MLX4_BMME_FLAG_TYPE_2_WIN       = 1 <<  9,
        MLX4_BMME_FLAG_RESERVED_LKEY    = 1 << 10,
        MLX4_BMME_FLAG_FAST_REG_WR      = 1 << 11,
+       MLX4_BMME_FLAG_VSD_INIT2RTR     = 1 << 28,
 };
 
 enum mlx4_event {
@@ -1196,6 +1197,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
                                  enum mlx4_net_trans_rule_id id);
 int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
 
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+                         int port, int qpn, u16 prio, u64 *reg_id);
+
 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
                          int i, int val);
 
index 7040dc98ff8baa59118cd42dd728d3152afb7c86..5f4e36cf00910c7220b743fd06f5471cdd7dc441 100644 (file)
@@ -56,7 +56,8 @@ enum mlx4_qp_optpar {
        MLX4_QP_OPTPAR_RNR_RETRY                = 1 << 13,
        MLX4_QP_OPTPAR_ACK_TIMEOUT              = 1 << 14,
        MLX4_QP_OPTPAR_SCHED_QUEUE              = 1 << 16,
-       MLX4_QP_OPTPAR_COUNTER_INDEX            = 1 << 20
+       MLX4_QP_OPTPAR_COUNTER_INDEX            = 1 << 20,
+       MLX4_QP_OPTPAR_VLAN_STRIPPING           = 1 << 21,
 };
 
 enum mlx4_qp_state {
@@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg {
 
 enum mlx4_update_qp_attr {
        MLX4_UPDATE_QP_SMAC             = 1 << 0,
+       MLX4_UPDATE_QP_VSD              = 1 << 2,
+       MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 2) - 1
+};
+
+enum mlx4_update_qp_params_flags {
+       MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE          = 1 << 0,
 };
 
 struct mlx4_update_qp_params {
        u8      smac_index;
+       u32     flags;
 };
 
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
index 38377392d08261291ceb1fcd177718484e05581c..c8e388e5fcccfa604d087ce33ccee5ace528e598 100644 (file)
@@ -3176,7 +3176,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
 }
 
 /**
- *  __dev_uc_unsync - Remove synchonized addresses from device
+ *  __dev_uc_unsync - Remove synchronized addresses from device
  *  @dev:  device to sync
  *  @unsync: function to call if address should be removed
  *
@@ -3220,7 +3220,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
 }
 
 /**
- *  __dev_mc_unsync - Remove synchonized addresses from device
+ *  __dev_mc_unsync - Remove synchronized addresses from device
  *  @dev:  device to sync
  *  @unsync: function to call if address should be removed
  *
index 2077489f98873bcbe4ca083cd1f0eb1b99eeab5e..2517ece988209a611b324a0bb8ade2b566eeb645 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/in6.h>
 #include <linux/wait.h>
 #include <linux/list.h>
+#include <linux/static_key.h>
 #include <uapi/linux/netfilter.h>
 #ifdef CONFIG_NETFILTER
 static inline int NF_DROP_GETERR(int verdict)
@@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
-#if defined(CONFIG_JUMP_LABEL)
-#include <linux/static_key.h>
+#ifdef HAVE_JUMP_LABEL
 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+
 static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
 {
        if (__builtin_constant_p(pf) &&
index 61978a4608412896a3f199024c9c7b7d349c152d..96453f9bc8bafa947d87a64b1102f8115ad43ea9 100644 (file)
@@ -303,6 +303,7 @@ struct pci_dev {
                                                   D3cold, not set for devices
                                                   powered on/off by the
                                                   corresponding bridge */
+       unsigned int    ignore_hotplug:1;       /* Ignore hotplug events */
        unsigned int    d3_delay;       /* D3->D0 transition time in ms */
        unsigned int    d3cold_delay;   /* D3cold->D0 transition time in ms */
 
@@ -1021,6 +1022,11 @@ bool pci_dev_run_wake(struct pci_dev *dev);
 bool pci_check_pme_status(struct pci_dev *dev);
 void pci_pme_wakeup_bus(struct pci_bus *bus);
 
+static inline void pci_ignore_hotplug(struct pci_dev *dev)
+{
+       dev->ignore_hotplug = 1;
+}
+
 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
                                  bool enable)
 {
index 3dfbf237cd8f32fc684e2934e7a8ace0da043b14..ef5894ca8e503d9171f74cce2f63298c703333d3 100644 (file)
@@ -71,6 +71,7 @@ void percpu_ref_reinit(struct percpu_ref *ref);
 void percpu_ref_exit(struct percpu_ref *ref);
 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
                                 percpu_ref_func_t *confirm_kill);
+void __percpu_ref_kill_expedited(struct percpu_ref *ref);
 
 /**
  * percpu_ref_kill - drop the initial ref
index 5c2c885ee52b3996a2665dc3d8c0e21ff9245aaf..b867a4dab38a2a4491f54b28b7aa2b809a4dad22 100644 (file)
@@ -1903,8 +1903,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
 #define PF_KTHREAD     0x00200000      /* I am a kernel thread */
 #define PF_RANDOMIZE   0x00400000      /* randomize virtual address space */
 #define PF_SWAPWRITE   0x00800000      /* Allowed to write to swap */
-#define PF_SPREAD_PAGE 0x01000000      /* Spread page cache over cpuset */
-#define PF_SPREAD_SLAB 0x02000000      /* Spread some slab caches over cpuset */
 #define PF_NO_SETAFFINITY 0x04000000   /* Userland is not allowed to meddle with cpus_allowed */
 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
 #define PF_MUTEX_TESTER        0x20000000      /* Thread belongs to the rt mutex tester */
@@ -1957,17 +1955,31 @@ static inline void memalloc_noio_restore(unsigned int flags)
 }
 
 /* Per-process atomic flags. */
-#define PFA_NO_NEW_PRIVS 0x00000001    /* May not gain new privileges. */
+#define PFA_NO_NEW_PRIVS 0     /* May not gain new privileges. */
+#define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
+#define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
 
-static inline bool task_no_new_privs(struct task_struct *p)
-{
-       return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
 
-static inline void task_set_no_new_privs(struct task_struct *p)
-{
-       set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
+#define TASK_PFA_TEST(name, func)                                      \
+       static inline bool task_##func(struct task_struct *p)           \
+       { return test_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_SET(name, func)                                       \
+       static inline void task_set_##func(struct task_struct *p)       \
+       { set_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_CLEAR(name, func)                                     \
+       static inline void task_clear_##func(struct task_struct *p)     \
+       { clear_bit(PFA_##name, &p->atomic_flags); }
+
+TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
+TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
+
+TASK_PFA_TEST(SPREAD_PAGE, spread_page)
+TASK_PFA_SET(SPREAD_PAGE, spread_page)
+TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
+
+TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
 
 /*
  * task->jobctl flags
@@ -2608,9 +2620,22 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
        task_thread_info(p)->task = p;
 }
 
+/*
+ * Return the address of the last usable long on the stack.
+ *
+ * When the stack grows down, this is just above the thread
+ * info struct. Going any lower will corrupt the threadinfo.
+ *
+ * When the stack grows up, this is the highest address.
+ * Beyond that position, we corrupt data on the next page.
+ */
 static inline unsigned long *end_of_stack(struct task_struct *p)
 {
+#ifdef CONFIG_STACK_GROWSUP
+       return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
+#else
        return (unsigned long *)(task_thread_info(p) + 1);
+#endif
 }
 
 #endif
index 48d64e6ab29279a5d46e57114b8272b7a2069c4f..290fbf0b6b8ac71736bfc92eec4d00003db912f5 100644 (file)
@@ -84,7 +84,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i);
 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
                        unsigned long nr_segs, size_t count);
 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
-                       unsigned maxpages, size_t *start);
+                       size_t maxsize, unsigned maxpages, size_t *start);
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
                        size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
index 502073a53dd320b115925a654ef7dabdbd49872e..b483abd344934f9cb9827d112883bfdf8e861d2b 100644 (file)
@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
 
 int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
 int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
 #else
 
@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
 
 static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
 static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
 
 #endif
index 2c02f3a8d2ba3f4ba079a51f537be25742b17162..c37bd4d06739034cf9592305a152f9ed6eb35db3 100644 (file)
@@ -182,7 +182,6 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
  *     vga_get()...
  */
 
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
 #ifdef CONFIG_VGA_ARB
 extern struct pci_dev *vga_default_device(void);
 extern void vga_set_default_device(struct pci_dev *pdev);
@@ -190,7 +189,6 @@ extern void vga_set_default_device(struct pci_dev *pdev);
 static inline struct pci_dev *vga_default_device(void) { return NULL; };
 static inline void vga_set_default_device(struct pci_dev *pdev) { };
 #endif
-#endif
 
 /**
  *     vga_conflicts
index a0cc2e95ed1b1ab3324abc7f0ffe44e51973645d..b996e6cde6bb652383b726ee4be6af3b0f8e4d8b 100644 (file)
@@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
        alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
                        1, (name))
 #define create_singlethread_workqueue(name)                            \
-       alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
+       alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
index fc910a622451b2bb112bdd5ba784a7cadfcbb315..2fefcf491aa853f8ed868a92f39e8da594422c9b 100644 (file)
@@ -295,7 +295,7 @@ struct vb2_buffer {
  *                     can return an error if hardware fails, in that case all
  *                     buffers that have been already given by the @buf_queue
  *                     callback are to be returned by the driver by calling
- *                     @vb2_buffer_done(VB2_BUF_STATE_DEQUEUED).
+ *                     @vb2_buffer_done(VB2_BUF_STATE_QUEUED).
  *                     If you need a minimum number of buffers before you can
  *                     start streaming, then set @min_buffers_needed in the
  *                     vb2_queue structure. If that is non-zero then
@@ -380,6 +380,9 @@ struct v4l2_fh;
  * @start_streaming_called: start_streaming() was called successfully and we
  *             started streaming.
  * @error:     a fatal error occurred on the queue
+ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
+ *             buffers. Only set for capture queues if qbuf has not yet been
+ *             called since poll() needs to return POLLERR in that situation.
  * @fileio:    file io emulator internal data, used only if emulator is active
  * @threadio:  thread io internal data, used only if thread is active
  */
@@ -417,6 +420,7 @@ struct vb2_queue {
        unsigned int                    streaming:1;
        unsigned int                    start_streaming_called:1;
        unsigned int                    error:1;
+       unsigned int                    waiting_for_buffers:1;
 
        struct vb2_fileio_data          *fileio;
        struct vb2_threadio_data        *threadio;
index f679877bb6017dd4a6da7d1fe0e6ea217ba3b3e4..ec51e673b4b67bf762cd3e606709f34069ae1a55 100644 (file)
@@ -204,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk);
 
 int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
 int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+void ipv6_ac_destroy_dev(struct inet6_dev *idev);
 bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                         const struct in6_addr *addr);
 bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
index b5d5af3aa469961af36b30b0aea2da1d4e959bac..6f884e6c731e41daccdf4119cb1f0f8a4ee9335f 100644 (file)
@@ -464,6 +464,8 @@ struct hci_conn_params {
                HCI_AUTO_CONN_ALWAYS,
                HCI_AUTO_CONN_LINK_LOSS,
        } auto_connect;
+
+       struct hci_conn *conn;
 };
 
 extern struct list_head hci_dev_list;
index 71c60f42be486b71d3f01d7e128a77101ada6432..a8ae4e760778d8fe49ff21951bd1e088ac3aefca 100644 (file)
@@ -480,6 +480,7 @@ void dst_init(void);
 /* Flags for xfrm_lookup flags argument. */
 enum {
        XFRM_LOOKUP_ICMP = 1 << 0,
+       XFRM_LOOKUP_QUEUE = 1 << 1,
 };
 
 struct flowi;
@@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
                                            int flags)
 {
        return dst_orig;
-} 
+}
+
+static inline struct dst_entry *xfrm_lookup_route(struct net *net,
+                                                 struct dst_entry *dst_orig,
+                                                 const struct flowi *fl,
+                                                 struct sock *sk,
+                                                 int flags)
+{
+       return dst_orig;
+}
 
 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 {
@@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                              const struct flowi *fl, struct sock *sk,
                              int flags);
 
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+                                   const struct flowi *fl, struct sock *sk,
+                                   int flags);
+
 /* skb attached with this dst needs transformation if dst->xfrm is valid */
 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 {
index 93695f0e22a54a7e4fa16f6934b1e085e75e1a7b..af10c2cf8a1dce00d9cc5414f67d5917bf911570 100644 (file)
@@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
        return netlink_set_err(net->genl_sock, portid, group, code);
 }
 
+static inline int genl_has_listeners(struct genl_family *family,
+                                    struct sock *sk, unsigned int group)
+{
+       if (WARN_ON_ONCE(group >= family->n_mcgrps))
+               return -EINVAL;
+       group = family->mcgrp_offset + group;
+       return netlink_has_listeners(sk, group);
+}
 #endif /* __NET_GENERIC_NETLINK_H */
index 9bcb220bd4ad13538ed0ee2d51546b1463cd116b..cf485f9aa56369fec2d47d82ca0cb5741ffca30e 100644 (file)
@@ -114,16 +114,13 @@ struct rt6_info {
        u32                             rt6i_flags;
        struct rt6key                   rt6i_src;
        struct rt6key                   rt6i_prefsrc;
-       u32                             rt6i_metric;
 
        struct inet6_dev                *rt6i_idev;
        unsigned long                   _rt6i_peer;
 
-       u32                             rt6i_genid;
-
+       u32                             rt6i_metric;
        /* more non-fragment space at head required */
        unsigned short                  rt6i_nfheader_len;
-
        u8                              rt6i_protocol;
 };
 
index 361d26077196678af5bd7ce217715a2cc847f2af..e0d64667a4b3e3be0cb9f70d45f02988fc19b21f 100644 (file)
@@ -352,26 +352,12 @@ static inline void rt_genid_bump_ipv4(struct net *net)
        atomic_inc(&net->ipv4.rt_genid);
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-static inline int rt_genid_ipv6(struct net *net)
-{
-       return atomic_read(&net->ipv6.rt_genid);
-}
-
-static inline void rt_genid_bump_ipv6(struct net *net)
-{
-       atomic_inc(&net->ipv6.rt_genid);
-}
-#else
-static inline int rt_genid_ipv6(struct net *net)
-{
-       return 0;
-}
-
+extern void (*__fib6_flush_trees)(struct net *net);
 static inline void rt_genid_bump_ipv6(struct net *net)
 {
+       if (__fib6_flush_trees)
+               __fib6_flush_trees(net);
 }
-#endif
 
 #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
 static inline struct netns_ieee802154_lowpan *
index e2070960bac009223c1c6caa1324d6323a478a7d..8170f8d7052b21f5b8e82d8cb7c1c52a56042378 100644 (file)
@@ -16,7 +16,6 @@ struct netns_sysctl_lowpan {
 struct netns_ieee802154_lowpan {
        struct netns_sysctl_lowpan sysctl;
        struct netns_frags      frags;
-       int                     max_dsize;
 };
 
 #endif
index 259992444e80ae0b88eaec4ff345d23fd8f81c75..dad7ab20a8cb204f08b2ecda724d5c1db5138ec8 100644 (file)
@@ -167,7 +167,7 @@ struct ieee80211_reg_rule {
 struct ieee80211_regdomain {
        struct rcu_head rcu_head;
        u32 n_reg_rules;
-       char alpha2[2];
+       char alpha2[3];
        enum nl80211_dfs_regions dfs_region;
        struct ieee80211_reg_rule reg_rules[];
 };
index a3cfb8ebeb53e2f233ea894fbe7e5a3835255c48..620e086c0cbeca36ab268abdaad9839d45f8a58d 100644 (file)
@@ -231,7 +231,8 @@ struct qdisc_skb_cb {
        unsigned int            pkt_len;
        u16                     slave_dev_queue_mapping;
        u16                     _pad;
-       unsigned char           data[24];
+#define QDISC_CB_PRIV_LEN 20
+       unsigned char           data[QDISC_CB_PRIV_LEN];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
index f6e7397e799dbd9dd1007c8f4e494c4b3405d28e..9fbd856e67139f9525b2951b147ed6f9223f7540 100644 (file)
@@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
        return asoc ? asoc->assoc_id : 0;
 }
 
+static inline enum sctp_sstat_state
+sctp_assoc_to_state(const struct sctp_association *asoc)
+{
+       /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
+        * got rid of it in kernel space. Therefore SCTP_CLOSED et al
+        * start at =1 in user space, but actually as =0 in kernel space.
+        * Now that we can not break user space and SCTP_EMPTY is exposed
+        * there, we need to fix it up with an ugly offset not to break
+        * applications. :(
+        */
+       return asoc->state + 1;
+}
+
 /* Look up the association by its id.  */
 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
 
index 7f2ab72f321a4bd437800ab551a5aff3a9eab44a..b9a5bd0ed9f3a6b7bc0d087b2b2ae5563b4a0304 100644 (file)
@@ -2165,9 +2165,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
         */
        if (sock_flag(sk, SOCK_RCVTSTAMP) ||
            (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
-           (kt.tv64 &&
-            (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
-             skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
+           (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
            (hwtstamps->hwtstamp.tv64 &&
             (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
                __sock_recv_timestamp(msg, sk, skb);
index e52ef5357e088b568e958de0d8545953ab90cef5..c52b68577cb0edc0d00732cc67441a4ac30595be 100644 (file)
@@ -290,7 +290,7 @@ struct wimax_dev;
  *     This operation has to be synchronous, and return only when the
  *     reset is complete. In case of having had to resort to bus/cold
  *     reset implying a device disconnection, the call is allowed to
- *     return inmediately.
+ *     return immediately.
  *     NOTE: wimax_dev->mutex is NOT locked when this op is being
  *     called; however, wimax_dev->mutex_reset IS locked to ensure
  *     serialization of calls to wimax_reset().
index 1ea0b65c4cfb8b4f70e52de3ab5a6eab91a16ebc..a2bf41e0bde969c0ea0090a6af6a11087f51fa2f 100644 (file)
@@ -47,6 +47,7 @@ struct ib_umem {
        int                     writable;
        int                     hugetlb;
        struct work_struct      work;
+       struct pid             *pid;
        struct mm_struct       *mm;
        unsigned long           diff;
        struct sg_table sg_head;
index cdcc90b07ecba7ec8bb675bdb1e1b26498f77a24..e64583560701bb1f2f0038717457321597b39e85 100644 (file)
@@ -68,7 +68,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
                return;
 
        if (!shost_use_blk_mq(sdev->host) &&
-           blk_queue_tagged(sdev->request_queue))
+           !blk_queue_tagged(sdev->request_queue))
                blk_queue_init_tags(sdev->request_queue, depth,
                                    sdev->host->bqt);
 
index 1c09820df58564f8d0430d996998edc6f8d893c0..3608bebd3d9c5e58a0349c6240a8670bd3d8bbfb 100644 (file)
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq,
  * @vec_nr:  softirq vector number
  *
  * When used in combination with the softirq_exit tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
  */
 DEFINE_EVENT(softirq, softirq_entry,
 
@@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry,
  * @vec_nr:  softirq vector number
  *
  * When used in combination with the softirq_entry tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
  */
 DEFINE_EVENT(softirq, softirq_exit,
 
index 24e9033f8b3f9f10fde3467b9122d59c9621edcd..be88166349a128ca80e521ee2cef80ca8617a702 100644 (file)
@@ -240,6 +240,7 @@ header-y += matroxfb.h
 header-y += mdio.h
 header-y += media.h
 header-y += mei.h
+header-y += memfd.h
 header-y += mempolicy.h
 header-y += meye.h
 header-y += mic_common.h
@@ -395,6 +396,7 @@ header-y += un.h
 header-y += unistd.h
 header-y += unix_diag.h
 header-y += usbdevice_fs.h
+header-y += usbip.h
 header-y += utime.h
 header-y += utsname.h
 header-y += uuid.h
index 19df18c9b8be371ec5d13fc09f69fc33f2ff7769..1874ebe9ac1e505b0215a90f03a9b2ce57f6b2ff 100644 (file)
@@ -165,6 +165,7 @@ struct input_keymap_entry {
 #define INPUT_PROP_BUTTONPAD           0x02    /* has button(s) under pad */
 #define INPUT_PROP_SEMI_MT             0x03    /* touch rectangle only */
 #define INPUT_PROP_TOPBUTTONPAD                0x04    /* softbuttons at top of pad */
+#define INPUT_PROP_POINTING_STICK      0x05    /* is a pointing stick */
 
 #define INPUT_PROP_MAX                 0x1f
 #define INPUT_PROP_CNT                 (INPUT_PROP_MAX + 1)
index 131a6ccdba25693e6899b7813d99a6934e846ce4..14334d0161d5db26746af589920d10757209a5fb 100644 (file)
@@ -53,6 +53,9 @@
 /* operation as Dom0 is supported */
 #define XENFEAT_dom0                      11
 
+/* Xen also maps grant references at pfn = mfn */
+#define XENFEAT_grant_map_identity        12
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */
index e84c6423a2e5a2dbe80157b13f8b16d17c3e2d06..80a6907f91c5d0fd6578cc3a2b82a5f090602773 100644 (file)
@@ -811,6 +811,7 @@ config LOG_BUF_SHIFT
        int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
        range 12 21
        default 17
+       depends on PRINTK
        help
          Select the minimal kernel log buffer size as a power of 2.
          The final size is affected by LOG_CPU_MAX_BUF_SHIFT config
@@ -830,6 +831,7 @@ config LOG_CPU_MAX_BUF_SHIFT
        range 0 21
        default 12 if !BASE_SMALL
        default 0 if BASE_SMALL
+       depends on PRINTK
        help
          This option allows to increase the default ring buffer size
          according to the number of CPUs. The value defines the contribution
@@ -1475,6 +1477,7 @@ config FUTEX
 
 config HAVE_FUTEX_CMPXCHG
        bool
+       depends on FUTEX
        help
          Architectures should select this if futex_atomic_cmpxchg_inatomic()
          is implemented and always working. This removes a couple of runtime
index b6237c31b0e2469cac9b2287cecb6932ad49462e..82f22885c87e995c3d18f6a0080bf639d99d9331 100644 (file)
@@ -539,6 +539,12 @@ void __init prepare_namespace(void)
 {
        int is_floppy;
 
+       if (root_delay) {
+               printk(KERN_INFO "Waiting %d sec before mounting root device...\n",
+                      root_delay);
+               ssleep(root_delay);
+       }
+
        /*
         * wait for the known devices to complete their probing
         *
@@ -565,12 +571,6 @@ void __init prepare_namespace(void)
        if (initrd_load())
                goto out;
 
-       if (root_delay) {
-               pr_info("Waiting %d sec before mounting root device...\n",
-                       root_delay);
-               ssleep(root_delay);
-       }
-
        /* wait for any asynchronous scanning to complete */
        if ((ROOT_DEV == 0) && root_wait) {
                printk(KERN_INFO "Waiting for root device %s...\n",
index 7dc8788cfd52dd222856057840a8d0a45a19816e..3a73f995a81e6167659a7b488f23a4feab3d2787 100644 (file)
@@ -1035,6 +1035,11 @@ static void cgroup_get(struct cgroup *cgrp)
        css_get(&cgrp->self);
 }
 
+static bool cgroup_tryget(struct cgroup *cgrp)
+{
+       return css_tryget(&cgrp->self);
+}
+
 static void cgroup_put(struct cgroup *cgrp)
 {
        css_put(&cgrp->self);
@@ -1147,7 +1152,8 @@ static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
         * protection against removal.  Ensure @cgrp stays accessible and
         * break the active_ref protection.
         */
-       cgroup_get(cgrp);
+       if (!cgroup_tryget(cgrp))
+               return NULL;
        kernfs_break_active_protection(kn);
 
        mutex_lock(&cgroup_mutex);
@@ -3271,8 +3277,17 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        struct cftype *cft;
 
-       for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
-               cft->flags |= __CFTYPE_NOT_ON_DFL;
+       /*
+        * If legacy_flies_on_dfl, we want to show the legacy files on the
+        * dfl hierarchy but iff the target subsystem hasn't been updated
+        * for the dfl hierarchy yet.
+        */
+       if (!cgroup_legacy_files_on_dfl ||
+           ss->dfl_cftypes != ss->legacy_cftypes) {
+               for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+                       cft->flags |= __CFTYPE_NOT_ON_DFL;
+       }
+
        return cgroup_add_cftypes(ss, cfts);
 }
 
@@ -3970,7 +3985,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 
        l = cgroup_pidlist_find_create(cgrp, type);
        if (!l) {
-               mutex_unlock(&cgrp->pidlist_mutex);
                pidlist_free(array);
                return -ENOMEM;
        }
@@ -4387,6 +4401,15 @@ static void css_release_work_fn(struct work_struct *work)
                /* cgroup release path */
                cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
                cgrp->id = -1;
+
+               /*
+                * There are two control paths which try to determine
+                * cgroup from dentry without going through kernfs -
+                * cgroupstats_build() and css_tryget_online_from_dir().
+                * Those are supported by RCU protecting clearing of
+                * cgrp->kn->priv backpointer.
+                */
+               RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
        }
 
        mutex_unlock(&cgroup_mutex);
@@ -4543,6 +4566,11 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
        struct cftype *base_files;
        int ssid, ret;
 
+       /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
+        */
+       if (strchr(name, '\n'))
+               return -EINVAL;
+
        parent = cgroup_kn_lock_live(parent_kn);
        if (!parent)
                return -ENODEV;
@@ -4820,16 +4848,6 @@ static int cgroup_rmdir(struct kernfs_node *kn)
 
        cgroup_kn_unlock(kn);
 
-       /*
-        * There are two control paths which try to determine cgroup from
-        * dentry without going through kernfs - cgroupstats_build() and
-        * css_tryget_online_from_dir().  Those are supported by RCU
-        * protecting clearing of cgrp->kn->priv backpointer, which should
-        * happen after all files under it have been removed.
-        */
-       if (!ret)
-               RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL);
-
        cgroup_put(cgrp);
        return ret;
 }
@@ -5416,7 +5434,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
        /*
         * This path doesn't originate from kernfs and @kn could already
         * have been or be removed at any point.  @kn->priv is RCU
-        * protected for this access.  See cgroup_rmdir() for details.
+        * protected for this access.  See css_release_work_fn() for details.
         */
        cgrp = rcu_dereference(kn->priv);
        if (cgrp)
index 22874d7cf2c017c2be6b03b5c00eeff3250d411c..52cb04c993b7282fb61d68e7a5223598841d9ff4 100644 (file)
@@ -365,13 +365,14 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
                                        struct task_struct *tsk)
 {
        if (is_spread_page(cs))
-               tsk->flags |= PF_SPREAD_PAGE;
+               task_set_spread_page(tsk);
        else
-               tsk->flags &= ~PF_SPREAD_PAGE;
+               task_clear_spread_page(tsk);
+
        if (is_spread_slab(cs))
-               tsk->flags |= PF_SPREAD_SLAB;
+               task_set_spread_slab(tsk);
        else
-               tsk->flags &= ~PF_SPREAD_SLAB;
+               task_clear_spread_slab(tsk);
 }
 
 /*
index f9c1ed002dbc81997e6e062f358c57b6e459ce84..963bf139e2b244fa1a4e2efb9a697e02b0c8dfa5 100644 (file)
@@ -1524,6 +1524,11 @@ retry:
         */
        if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
                goto retry;
        }
 
@@ -1967,6 +1972,11 @@ retry:
         */
        if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
                goto retry;
        }
 
@@ -7938,8 +7948,10 @@ int perf_event_init_task(struct task_struct *child)
 
        for_each_task_context_nr(ctxn) {
                ret = perf_event_init_context(child, ctxn);
-               if (ret)
+               if (ret) {
+                       perf_event_free_task(child);
                        return ret;
+               }
        }
 
        return 0;
index 0cf9cdb6e4919f254b32d04f6fdeb4943143a8d3..a91e47d86de214613fd4a2f04d3e7dea7509a7a7 100644 (file)
@@ -1360,7 +1360,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto bad_fork_cleanup_policy;
        retval = audit_alloc(p);
        if (retval)
-               goto bad_fork_cleanup_policy;
+               goto bad_fork_cleanup_perf;
        /* copy all the process information */
        shm_init_task(p);
        retval = copy_semundo(clone_flags, p);
@@ -1566,8 +1566,9 @@ bad_fork_cleanup_semundo:
        exit_sem(p);
 bad_fork_cleanup_audit:
        audit_free(p);
-bad_fork_cleanup_policy:
+bad_fork_cleanup_perf:
        perf_event_free_task(p);
+bad_fork_cleanup_policy:
 #ifdef CONFIG_NUMA
        mpol_put(p->mempolicy);
 bad_fork_cleanup_threadgroup_lock:
index d3a9d946d0b7f918e5622a7c6e2ef8c9fe88775c..815d7af2ffe8c6b195e729625ef5bfecb399accb 100644 (file)
@@ -2592,6 +2592,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
         * shared futexes. We need to compare the keys:
         */
        if (match_futex(&q.key, &key2)) {
+               queue_unlock(hb);
                ret = -EINVAL;
                goto out_put_keys;
        }
index e30ac0fe61c3ded5533cb4db8e95b2d41dcf2ac8..0aa69ea1d8fdcfa68046aa75b03c4373783a02fa 100644 (file)
@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
  */
 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
 {
-       long ret;
+       long t1, t2;
 
-       ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+       t1 = kptr_obfuscate((long)v1, type);
+       t2 = kptr_obfuscate((long)v2, type);
 
-       return (ret < 0) | ((ret > 0) << 1);
+       return (t1 < t2) | ((t1 > t2) << 1);
 }
 
 /* The caller must have pinned the task */
index c4b8093c80b3bad9b6f9cd56a086df5a4c159508..f1604d8cf489a2e0cc689ab82ce6c3adfb82f693 100644 (file)
@@ -725,14 +725,6 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
        clear_bit(bit, addr);
 }
 
-static void memory_bm_clear_current(struct memory_bitmap *bm)
-{
-       int bit;
-
-       bit = max(bm->cur.node_bit - 1, 0);
-       clear_bit(bit, bm->cur.node->data);
-}
-
 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 {
        void *addr;
@@ -1341,35 +1333,23 @@ static struct memory_bitmap copy_bm;
 
 void swsusp_free(void)
 {
-       unsigned long fb_pfn, fr_pfn;
-
-       memory_bm_position_reset(forbidden_pages_map);
-       memory_bm_position_reset(free_pages_map);
-
-loop:
-       fr_pfn = memory_bm_next_pfn(free_pages_map);
-       fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
-
-       /*
-        * Find the next bit set in both bitmaps. This is guaranteed to
-        * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
-        */
-       do {
-               if (fb_pfn < fr_pfn)
-                       fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
-               if (fr_pfn < fb_pfn)
-                       fr_pfn = memory_bm_next_pfn(free_pages_map);
-       } while (fb_pfn != fr_pfn);
-
-       if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
-               struct page *page = pfn_to_page(fr_pfn);
+       struct zone *zone;
+       unsigned long pfn, max_zone_pfn;
 
-               memory_bm_clear_current(forbidden_pages_map);
-               memory_bm_clear_current(free_pages_map);
-               __free_page(page);
-               goto loop;
+       for_each_populated_zone(zone) {
+               max_zone_pfn = zone_end_pfn(zone);
+               for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
+                       if (pfn_valid(pfn)) {
+                               struct page *page = pfn_to_page(pfn);
+
+                               if (swsusp_page_is_forbidden(page) &&
+                                   swsusp_page_is_free(page)) {
+                                       swsusp_unset_page_forbidden(page);
+                                       swsusp_unset_page_free(page);
+                                       __free_page(page);
+                               }
+                       }
        }
-
        nr_copy_pages = 0;
        nr_meta_pages = 0;
        restore_pblist = NULL;
index e04c455a0e3860b2aa5784d4c2522a21300525e4..1ce770687ea83c6a05c562591fe475b5f2cc52d0 100644 (file)
@@ -1665,15 +1665,15 @@ asmlinkage int vprintk_emit(int facility, int level,
        raw_spin_lock(&logbuf_lock);
        logbuf_cpu = this_cpu;
 
-       if (recursion_bug) {
+       if (unlikely(recursion_bug)) {
                static const char recursion_msg[] =
                        "BUG: recent printk recursion!";
 
                recursion_bug = 0;
-               text_len = strlen(recursion_msg);
                /* emit KERN_CRIT message */
                printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
-                                        NULL, 0, recursion_msg, text_len);
+                                        NULL, 0, recursion_msg,
+                                        strlen(recursion_msg));
        }
 
        /*
index 4aec4a457431ed50ab459e81ba622674410bff47..a7077d3ae52fe2e9bcd43bf076d748eb5bf6ef2a 100644 (file)
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
 static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
                                                        ktime_t now)
 {
+       unsigned long flags;
        struct k_itimer *ptr = container_of(alarm, struct k_itimer,
                                                it.alarm.alarmtimer);
-       if (posix_timer_event(ptr, 0) != 0)
-               ptr->it_overrun++;
+       enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+
+       spin_lock_irqsave(&ptr->it_lock, flags);
+       if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
+               if (posix_timer_event(ptr, 0) != 0)
+                       ptr->it_overrun++;
+       }
 
        /* Re-add periodic timers */
        if (ptr->it.alarm.interval.tv64) {
                ptr->it_overrun += alarm_forward(alarm, now,
                                                ptr->it.alarm.interval);
-               return ALARMTIMER_RESTART;
+               result = ALARMTIMER_RESTART;
        }
-       return ALARMTIMER_NORESTART;
+       spin_unlock_irqrestore(&ptr->it_lock, flags);
+
+       return result;
 }
 
 /**
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
  * @new_timer: k_itimer pointer
  * @cur_setting: itimerspec data to fill
  *
- * Copies the itimerspec data out from the k_itimer
+ * Copies out the current itimerspec data
  */
 static void alarm_timer_get(struct k_itimer *timr,
                                struct itimerspec *cur_setting)
 {
-       memset(cur_setting, 0, sizeof(struct itimerspec));
+       ktime_t relative_expiry_time =
+               alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
+
+       if (ktime_to_ns(relative_expiry_time) > 0) {
+               cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
+       } else {
+               cur_setting->it_value.tv_sec = 0;
+               cur_setting->it_value.tv_nsec = 0;
+       }
 
-       cur_setting->it_interval =
-                       ktime_to_timespec(timr->it.alarm.interval);
-       cur_setting->it_value =
-               ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
-       return;
+       cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
 }
 
 /**
index f0294ba14634345b604ebf88ca5b5977fadfad17..a9ae20fb0b11ca879220b7c80467b7485fb3914f 100644 (file)
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
  * that a remainder subtract here would not do the right thing as the
  * resolution values don't fall on second boundries.  I.e. the line:
  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ * Note that due to the small error in the multiplier here, this
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
+ * OK.
  *
  * Rather, we just shift the bits off the right.
  *
  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
  * value to a scaled second value.
  */
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
 {
-       unsigned long sec = value->tv_sec;
-       long nsec = value->tv_nsec + TICK_NSEC - 1;
+       nsec = nsec + TICK_NSEC - 1;
 
        if (sec >= MAX_SEC_IN_JIFFIES){
                sec = MAX_SEC_IN_JIFFIES;
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value)
                 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 
 }
+
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+       return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+
 EXPORT_SYMBOL(timespec_to_jiffies);
 
 void
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
 }
 EXPORT_SYMBOL(jiffies_to_timespec);
 
-/* Same for "timeval"
- *
- * Well, almost.  The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
+/*
+ * We could use a similar algorithm to timespec_to_jiffies (with a
+ * different multiplier for usec instead of nsec). But this has a
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
+ * usec value, since it's not necessarily integral.
+ *
+ * We could instead round in the intermediate scaled representation
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
+ * perilous: the scaling introduces a small positive error, which
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
+ * units to the intermediate before shifting) leads to accidental
+ * overflow and overestimates.
+ *
+ * At the cost of one additional multiplication by a constant, just
+ * use the timespec implementation.
  */
 unsigned long
 timeval_to_jiffies(const struct timeval *value)
 {
-       unsigned long sec = value->tv_sec;
-       long usec = value->tv_usec;
-
-       if (sec >= MAX_SEC_IN_JIFFIES){
-               sec = MAX_SEC_IN_JIFFIES;
-               usec = 0;
-       }
-       return (((u64)sec * SEC_CONVERSION) +
-               (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
-                (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+       return __timespec_to_jiffies(value->tv_sec,
+                                    value->tv_usec * NSEC_PER_USEC);
 }
 EXPORT_SYMBOL(timeval_to_jiffies);
 
index b38fb2b9e23772d3f3360ef556cf0714d4a44803..2d75c94ae87d871bbf42db7b1ee949463bb7f65f 100644 (file)
@@ -3359,7 +3359,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
        iter->head = cpu_buffer->reader_page->read;
 
        iter->cache_reader_page = iter->head_page;
-       iter->cache_read = iter->head;
+       iter->cache_read = cpu_buffer->read;
 
        if (iter->head)
                iter->read_stamp = cpu_buffer->read_stamp;
index a5ce0c7f6c302058da57fe19bda51b9286cdd883..54cf309a92a5ed7c8d4af783bd8811d0b706dfd6 100644 (file)
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
 config ARCH_USE_CMPXCHG_LOCKREF
        bool
 
+config ARCH_HAS_FAST_MULTIPLIER
+       bool
+
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index ae146f0734eb59adcd2148af99f5ed89d013bd93..2404d03e251a64ae7634d30c46aa22a08f9b4538 100644 (file)
@@ -1723,11 +1723,13 @@ ascend_old_tree:
                shortcut = assoc_array_ptr_to_shortcut(ptr);
                slot = shortcut->parent_slot;
                cursor = shortcut->back_pointer;
+               if (!cursor)
+                       goto gc_complete;
        } else {
                slot = node->parent_slot;
                cursor = ptr;
        }
-       BUG_ON(!ptr);
+       BUG_ON(!cursor);
        node = assoc_array_ptr_to_node(cursor);
        slot++;
        goto continue_node;
index bdb9a456bcbb50471310b9f636df72c6bc377ddd..38d2db82228c2ce716a68d266efe772e1bc6bc5d 100644 (file)
@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
        if (!np_pool)
                return NULL;
        pdev = of_find_device_by_node(np_pool);
+       of_node_put(np_pool);
        if (!pdev)
                return NULL;
        return dev_get_gen_pool(&pdev->dev);
index b7d81ba143d13e01db3d6b92ea171c094bb7a1c6..9a5c1f2215585f35a8eea43eedd2566a6f459231 100644 (file)
@@ -11,7 +11,7 @@
 
 unsigned int __sw_hweight32(unsigned int w)
 {
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
        w -= (w >> 1) & 0x55555555;
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
        return __sw_hweight32((unsigned int)(w >> 32)) +
               __sw_hweight32((unsigned int)w);
 #elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
        w -= (w >> 1) & 0x5555555555555555ul;
        w =  (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
        w =  (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
index fe5a3342e9607d8ee9bd11678ef31d070a81ccf5..a89cf09a82684d729222699afb1f911162977ba7 100644 (file)
@@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
        call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
 }
 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/*
+ * XXX: Temporary kludge to work around SCSI blk-mq stall.  Used only by
+ * block/blk-mq.c::blk_mq_freeze_queue().  Will be removed during v3.18
+ * devel cycle.  Do not use anywhere else.
+ */
+void __percpu_ref_kill_expedited(struct percpu_ref *ref)
+{
+       WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
+                 "percpu_ref_kill() called more than once on %pf!",
+                 ref->release);
+
+       ref->pcpu_count_ptr |= PCPU_REF_DEAD;
+       synchronize_sched_expedited();
+       percpu_ref_kill_rcu(&ref->rcu);
+}
index a2c78810ebc1a64a95903645dec6d14eed6ba1b6..16d02639d3346c4924caa5b0257c500e66289064 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/hash.h>
 #include <linux/random.h>
 #include <linux/rhashtable.h>
-#include <linux/log2.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4UL
@@ -589,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
  * rhashtable_destroy - destroy hash table
  * @ht:                the hash table to destroy
  *
- * Frees the bucket array.
+ * Frees the bucket array. This function is not rcu safe, therefore the caller
+ * has to make sure that no resizing may happen by unpublishing the hashtable
+ * and waiting for the quiescent cycle before releasing the bucket array.
  */
 void rhashtable_destroy(const struct rhashtable *ht)
 {
-       const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
-
-       bucket_table_free(tbl);
+       bucket_table_free(ht->tbl);
 }
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
 
index 992bf30af7595f2cbc9b385c24a06e6990dd2c49..f3c6ff596414e6f9bd1cf58071fadfd4c7a31b35 100644 (file)
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
                return check_bytes8(start, value, bytes);
 
        value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
        value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
        value64 *= 0x01010101;
        value64 |= value64 << 32;
 #else
index 306baa594f95cd8ed8e5c945e4cc46e4f19c1f93..ba8019b063e18ecb14da4cf3a64dfa671586f8e0 100644 (file)
@@ -176,7 +176,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
        if (list_empty(&dev->dma_pools) &&
            device_create_file(dev, &dev_attr_pools)) {
                kfree(retval);
-               return NULL;
+               retval = NULL;
        } else
                list_add(&retval->pools, &dev->dma_pools);
        mutex_unlock(&pools_lock);
index d9a21d06b8623571cabe5f73532d02412a94ae9c..f8ffd9412ec502d10f2c103554ffe333306e2e3f 100644 (file)
@@ -1795,14 +1795,17 @@ static int __split_huge_page_map(struct page *page,
                for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
                        pte_t *pte, entry;
                        BUG_ON(PageCompound(page+i));
+                       /*
+                        * Note that pmd_numa is not transferred deliberately
+                        * to avoid any possibility that pte_numa leaks to
+                        * a PROT_NONE VMA by accident.
+                        */
                        entry = mk_pte(page + i, vma->vm_page_prot);
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                        if (!pmd_write(*pmd))
                                entry = pte_wrprotect(entry);
                        if (!pmd_young(*pmd))
                                entry = pte_mkold(entry);
-                       if (pmd_numa(*pmd))
-                               entry = pte_mknuma(entry);
                        pte = pte_offset_map(&_pmd, haddr);
                        BUG_ON(!pte_none(*pte));
                        set_pte_at(mm, haddr, pte, entry);
index ab88dc0ea1d36a3a7e971f096e8e10c4a1646c93..9a09f2034fcc53a2733d931f33e82d7c3d79fa97 100644 (file)
@@ -310,7 +310,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
 EXPORT_SYMBOL(iov_iter_init);
 
 static ssize_t get_pages_iovec(struct iov_iter *i,
-                  struct page **pages, unsigned maxpages,
+                  struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
 {
        size_t offset = i->iov_offset;
@@ -323,6 +323,8 @@ static ssize_t get_pages_iovec(struct iov_iter *i,
        len = iov->iov_len - offset;
        if (len > i->count)
                len = i->count;
+       if (len > maxsize)
+               len = maxsize;
        addr = (unsigned long)iov->iov_base + offset;
        len += *start = addr & (PAGE_SIZE - 1);
        if (len > maxpages * PAGE_SIZE)
@@ -588,13 +590,15 @@ static unsigned long alignment_bvec(const struct iov_iter *i)
 }
 
 static ssize_t get_pages_bvec(struct iov_iter *i,
-                  struct page **pages, unsigned maxpages,
+                  struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
 {
        const struct bio_vec *bvec = i->bvec;
        size_t len = bvec->bv_len - i->iov_offset;
        if (len > i->count)
                len = i->count;
+       if (len > maxsize)
+               len = maxsize;
        /* can't be more than PAGE_SIZE */
        *start = bvec->bv_offset + i->iov_offset;
 
@@ -711,13 +715,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
 EXPORT_SYMBOL(iov_iter_alignment);
 
 ssize_t iov_iter_get_pages(struct iov_iter *i,
-                  struct page **pages, unsigned maxpages,
+                  struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
 {
        if (i->type & ITER_BVEC)
-               return get_pages_bvec(i, pages, maxpages, start);
+               return get_pages_bvec(i, pages, maxsize, maxpages, start);
        else
-               return get_pages_iovec(i, pages, maxpages, start);
+               return get_pages_iovec(i, pages, maxsize, maxpages, start);
 }
 EXPORT_SYMBOL(iov_iter_get_pages);
 
index 70fad0c0dafb60c8f64d919b607bfb22e568afba..6ecb0d937fb5f9bde4015d20b620484f5c965f84 100644 (file)
@@ -816,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
                if (nid != NUMA_NO_NODE && nid != m_nid)
                        continue;
 
+               /* skip hotpluggable memory regions if needed */
+               if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+                       continue;
+
                if (!type_b) {
                        if (out_start)
                                *out_start = m_start;
index 085dc6d2f876374c07a518866ecb1d25a2e8c419..28928ce9b07fbed2529ebdcd708d2aa3345410a4 100644 (file)
@@ -292,6 +292,9 @@ struct mem_cgroup {
        /* vmpressure notifications */
        struct vmpressure vmpressure;
 
+       /* css_online() has been completed */
+       int initialized;
+
        /*
         * the counter to account for mem+swap usage.
         */
@@ -1099,10 +1102,21 @@ skip_node:
         * skipping css reference should be safe.
         */
        if (next_css) {
-               if ((next_css == &root->css) ||
-                   ((next_css->flags & CSS_ONLINE) &&
-                    css_tryget_online(next_css)))
-                       return mem_cgroup_from_css(next_css);
+               struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
+
+               if (next_css == &root->css)
+                       return memcg;
+
+               if (css_tryget_online(next_css)) {
+                       /*
+                        * Make sure the memcg is initialized:
+                        * mem_cgroup_css_online() orders the the
+                        * initialization against setting the flag.
+                        */
+                       if (smp_load_acquire(&memcg->initialized))
+                               return memcg;
+                       css_put(next_css);
+               }
 
                prev_css = next_css;
                goto skip_node;
@@ -5549,6 +5563,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
+       int ret;
 
        if (css->id > MEM_CGROUP_ID_MAX)
                return -ENOSPC;
@@ -5585,7 +5600,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
        }
        mutex_unlock(&memcg_create_mutex);
 
-       return memcg_init_kmem(memcg, &memory_cgrp_subsys);
+       ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
+       if (ret)
+               return ret;
+
+       /*
+        * Make sure the memcg is initialized: mem_cgroup_iter()
+        * orders reading memcg->initialized against its callers
+        * reading the memcg members.
+        */
+       smp_store_release(&memcg->initialized, 1);
+
+       return 0;
 }
 
 /*
index adeac306610f7d7895e9d361d11649fb31f0a914..e229970e4223ff02cb24b43b9e47e5ee211555d6 100644 (file)
@@ -118,6 +118,8 @@ __setup("norandmaps", disable_randmaps);
 unsigned long zero_pfn __read_mostly;
 unsigned long highest_memmap_pfn __read_mostly;
 
+EXPORT_SYMBOL(zero_pfn);
+
 /*
  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  */
@@ -1125,7 +1127,7 @@ again:
                                                addr) != page->index) {
                                pte_t ptfile = pgoff_to_pte(page->index);
                                if (pte_soft_dirty(ptent))
-                                       pte_file_mksoft_dirty(ptfile);
+                                       ptfile = pte_file_mksoft_dirty(ptfile);
                                set_pte_at(mm, addr, pte, ptfile);
                        }
                        if (PageAnon(page))
index f78ec9bd454dd04585d208bd065460c92f04c042..2740360cd216ca45054387f481c7c31775d4ebff 100644 (file)
@@ -146,8 +146,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
        if (pte_swp_soft_dirty(*ptep))
                pte = pte_mksoft_dirty(pte);
+
+       /* Recheck VMA as permissions can change since migration started  */
        if (is_write_migration_entry(entry))
-               pte = pte_mkwrite(pte);
+               pte = maybe_mkwrite(pte, vma);
+
 #ifdef CONFIG_HUGETLB_PAGE
        if (PageHuge(new)) {
                pte = pte_mkhuge(pte);
index c1f2ea4a0b9960d39940c82f4ba329dad2df74df..c0a3637cdb645b0eed66c18edcd10431bfd5417b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root)
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
                if (vma->vm_start < prev) {
-                       pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev);
+                       pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
                        bug = 1;
                }
                if (vma->vm_start < pend) {
-                       pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend);
+                       pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
                        bug = 1;
                }
                if (vma->vm_start > vma->vm_end) {
-                       pr_info("vm_end %lx < vm_start %lx\n",
+                       pr_emerg("vm_end %lx < vm_start %lx\n",
                                vma->vm_end, vma->vm_start);
                        bug = 1;
                }
                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
-                       pr_info("free gap %lx, correct %lx\n",
+                       pr_emerg("free gap %lx, correct %lx\n",
                               vma->rb_subtree_gap,
                               vma_compute_subtree_gap(vma));
                        bug = 1;
@@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root)
        for (nd = pn; nd; nd = rb_prev(nd))
                j++;
        if (i != j) {
-               pr_info("backwards %d, forwards %d\n", j, i);
+               pr_emerg("backwards %d, forwards %d\n", j, i);
                bug = 1;
        }
        return bug ? -1 : i;
@@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm)
                i++;
        }
        if (i != mm->map_count) {
-               pr_info("map_count %d vm_next %d\n", mm->map_count, i);
+               pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
                bug = 1;
        }
        if (highest_address != mm->highest_vm_end) {
-               pr_info("mm->highest_vm_end %lx, found %lx\n",
+               pr_emerg("mm->highest_vm_end %lx, found %lx\n",
                       mm->highest_vm_end, highest_address);
                bug = 1;
        }
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count) {
-               pr_info("map_count %d rb %d\n", mm->map_count, i);
+               pr_emerg("map_count %d rb %d\n", mm->map_count, i);
                bug = 1;
        }
        BUG_ON(bug);
index 7ed58602e71b186c46db4bf8857558f6e08ac60a..7c7ab32ee5032dad07354f438b5832649aaa044b 100644 (file)
@@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void)
        phys_addr_t start, end;
        u64 i;
 
+       memblock_clear_hotplug(0, -1);
+
        for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
                count += __free_memory_core(start, end);
 
index 18cee0d4c8a20705a4b3e7dd73e7d1d8a8b8d595..eee961958021e8c31e02ac6845b290a21d5a1690 100644 (file)
@@ -1612,7 +1612,7 @@ again:
        }
 
        __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-       if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 &&
+       if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
            !zone_is_fair_depleted(zone))
                zone_set_flag(zone, ZONE_FAIR_DEPLETED);
 
@@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void)
                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
 
                __mod_zone_page_state(zone, NR_ALLOC_BATCH,
-                                     high_wmark_pages(zone) -
-                                     low_wmark_pages(zone) -
-                                     zone_page_state(zone, NR_ALLOC_BATCH));
+                       high_wmark_pages(zone) - low_wmark_pages(zone) -
+                       atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
 
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
index 3707c71ae4cddbec027eac857291185c662c760a..51108165f829d777e4c69abe6109cb88ca4d7e14 100644 (file)
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                            int page_start, int page_end)
 {
        const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
-       unsigned int cpu;
+       unsigned int cpu, tcpu;
        int i;
 
        for_each_possible_cpu(cpu) {
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                        struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
 
                        *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
-                       if (!*pagep) {
-                               pcpu_free_pages(chunk, pages, populated,
-                                               page_start, page_end);
-                               return -ENOMEM;
-                       }
+                       if (!*pagep)
+                               goto err;
                }
        }
        return 0;
+
+err:
+       while (--i >= page_start)
+               __free_page(pages[pcpu_page_idx(cpu, i)]);
+
+       for_each_possible_cpu(tcpu) {
+               if (tcpu == cpu)
+                       break;
+               for (i = page_start; i < page_end; i++)
+                       __free_page(pages[pcpu_page_idx(tcpu, i)]);
+       }
+       return -ENOMEM;
 }
 
 /**
@@ -263,6 +272,7 @@ err:
                __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
                                   page_end - page_start);
        }
+       pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
        return err;
 }
 
index 2139e30a4b4490da90c2e2811d5ecc901ff07d61..da997f9800bdeab14b47b694b09593de89db4a17 100644 (file)
@@ -1932,6 +1932,8 @@ void __init setup_per_cpu_areas(void)
 
        if (pcpu_setup_first_chunk(ai, fc) < 0)
                panic("Failed to initialize percpu areas.");
+
+       pcpu_free_alloc_info(ai);
 }
 
 #endif /* CONFIG_SMP */
index 0e5fb225007c519a27b680673160011bd74dd445..469f90d56051984c674f395042029ea737608d5b 100644 (file)
@@ -2367,8 +2367,10 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc
 
        if (new_dentry->d_inode) {
                (void) shmem_unlink(new_dir, new_dentry);
-               if (they_are_dirs)
+               if (they_are_dirs) {
+                       drop_nlink(new_dentry->d_inode);
                        drop_nlink(old_dir);
+               }
        } else if (they_are_dirs) {
                drop_nlink(old_dir);
                inc_nlink(new_dir);
index a467b308c682334254c53fdf12a6114aad1ce1fd..7c52b3890d25378198341101242c671e585649bf 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2124,7 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
 int
 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
 {
-       size_t left_over, freelist_size, ralign;
+       size_t left_over, freelist_size;
+       size_t ralign = BYTES_PER_WORD;
        gfp_t gfp;
        int err;
        size_t size = cachep->size;
@@ -2157,14 +2158,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
                size &= ~(BYTES_PER_WORD - 1);
        }
 
-       /*
-        * Redzoning and user store require word alignment or possibly larger.
-        * Note this will be overridden by architecture or caller mandated
-        * alignment if either is greater than BYTES_PER_WORD.
-        */
-       if (flags & SLAB_STORE_USER)
-               ralign = BYTES_PER_WORD;
-
        if (flags & SLAB_RED_ZONE) {
                ralign = REDZONE_ALIGN;
                /* If redzoning, ensure that the second redzone is suitably
@@ -2994,7 +2987,7 @@ out:
 
 #ifdef CONFIG_NUMA
 /*
- * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
+ * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
  *
  * If we are in_interrupt, then process context, including cpusets and
  * mempolicy, may not apply and should not be used for allocation policy.
@@ -3226,7 +3219,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
 {
        void *objp;
 
-       if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
+       if (current->mempolicy || cpuset_do_slab_mem_spread()) {
                objp = alternate_node_alloc(cache, flags);
                if (objp)
                        goto out;
index b50dabb3f86ab49667cb939af29cef53a03b2ac2..faff6247ac8fb8769bf58f235fd92c0deab6aaf1 100644 (file)
@@ -589,6 +589,14 @@ EXPORT_SYMBOL(hci_get_route);
 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
 {
        struct hci_dev *hdev = conn->hdev;
+       struct hci_conn_params *params;
+
+       params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+                                          conn->dst_type);
+       if (params && params->conn) {
+               hci_conn_drop(params->conn);
+               params->conn = NULL;
+       }
 
        conn->state = BT_CLOSED;
 
index c32d361c0cf766478f80fb9ed8a4884f53451122..1d9c29a00568d9b6ac3eea93848ec6637e5a5a5d 100644 (file)
@@ -2536,8 +2536,13 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
 {
        struct hci_conn_params *p;
 
-       list_for_each_entry(p, &hdev->le_conn_params, list)
+       list_for_each_entry(p, &hdev->le_conn_params, list) {
+               if (p->conn) {
+                       hci_conn_drop(p->conn);
+                       p->conn = NULL;
+               }
                list_del_init(&p->action);
+       }
 
        BT_DBG("All LE pending actions cleared");
 }
@@ -2578,8 +2583,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        hci_dev_lock(hdev);
        hci_inquiry_cache_flush(hdev);
-       hci_conn_hash_flush(hdev);
        hci_pend_le_actions_clear(hdev);
+       hci_conn_hash_flush(hdev);
        hci_dev_unlock(hdev);
 
        hci_notify(hdev, HCI_DEV_DOWN);
@@ -3727,6 +3732,9 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
        if (!params)
                return;
 
+       if (params->conn)
+               hci_conn_drop(params->conn);
+
        list_del(&params->action);
        list_del(&params->list);
        kfree(params);
@@ -3757,6 +3765,8 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
        struct hci_conn_params *params, *tmp;
 
        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+               if (params->conn)
+                       hci_conn_drop(params->conn);
                list_del(&params->action);
                list_del(&params->list);
                kfree(params);
index be35598984d9b9b3120cd178f67f0418858b2774..a6000823f0ff34d938631bed2e39a6b17a26ac13 100644 (file)
@@ -4221,8 +4221,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        hci_proto_connect_cfm(conn, ev->status);
 
        params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
-       if (params)
+       if (params) {
                list_del_init(&params->action);
+               if (params->conn) {
+                       hci_conn_drop(params->conn);
+                       params->conn = NULL;
+               }
+       }
 
 unlock:
        hci_update_background_scan(hdev);
@@ -4304,8 +4309,16 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
 
        conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
                              HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
-       if (!IS_ERR(conn))
+       if (!IS_ERR(conn)) {
+               /* Store the pointer since we don't really have any
+                * other owner of the object besides the params that
+                * triggered it. This way we can abort the connection if
+                * the parameters get removed and keep the reference
+                * count consistent once the connection is established.
+                */
+               params->conn = conn;
                return;
+       }
 
        switch (PTR_ERR(conn)) {
        case -EBUSY:
index 62a7fa2e356921d27729d365627d490dbf8a6126..b6c04cbcfdc5ee4aa8200677e50107014e0ce2cb 100644 (file)
@@ -309,6 +309,9 @@ struct br_input_skb_cb {
        int igmp;
        int mrouters_only;
 #endif
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       bool vlan_filtered;
+#endif
 };
 
 #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
index e1bcd653899b4ed0a7ca8714817f9a9350eb414d..3ba57fcdcd13fec638aa17e539e4baf0d3e010eb 100644 (file)
@@ -27,9 +27,13 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
 {
        if (flags & BRIDGE_VLAN_INFO_PVID)
                __vlan_add_pvid(v, vid);
+       else
+               __vlan_delete_pvid(v, vid);
 
        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
                set_bit(vid, v->untagged_bitmap);
+       else
+               clear_bit(vid, v->untagged_bitmap);
 }
 
 static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
@@ -125,7 +129,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
 {
        u16 vid;
 
-       if (!br->vlan_enabled)
+       /* If this packet was not filtered at input, let it pass */
+       if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
                goto out;
 
        /* Vlan filter table must be configured at this point.  The
@@ -164,8 +169,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        /* If VLAN filtering is disabled on the bridge, all packets are
         * permitted.
         */
-       if (!br->vlan_enabled)
+       if (!br->vlan_enabled) {
+               BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
                return true;
+       }
 
        /* If there are no vlan in the permitted list, all packets are
         * rejected.
@@ -173,6 +180,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        if (!v)
                goto drop;
 
+       BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
        proto = br->vlan_proto;
 
        /* If vlan tx offload is disabled on bridge device and frame was
@@ -251,7 +259,8 @@ bool br_allowed_egress(struct net_bridge *br,
 {
        u16 vid;
 
-       if (!br->vlan_enabled)
+       /* If this packet was not filtered at input, let it pass */
+       if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
                return true;
 
        if (!v)
@@ -270,6 +279,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
        struct net_bridge *br = p->br;
        struct net_port_vlans *v;
 
+       /* If filtering was disabled at input, let it pass. */
        if (!br->vlan_enabled)
                return true;
 
index 96238ba95f2b6954c598294243b8f65931a82284..de6662b14e1f5d7110ac1316c1362e4db6bffc5f 100644 (file)
@@ -13,8 +13,6 @@
 #include "auth_x.h"
 #include "auth_x_protocol.h"
 
-#define TEMP_TICKET_BUF_LEN    256
-
 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
 
 static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
 }
 
 static int ceph_x_decrypt(struct ceph_crypto_key *secret,
-                         void **p, void *end, void *obuf, size_t olen)
+                         void **p, void *end, void **obuf, size_t olen)
 {
        struct ceph_x_encrypt_header head;
        size_t head_len = sizeof(head);
@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
                return -EINVAL;
 
        dout("ceph_x_decrypt len %d\n", len);
-       ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
-                           *p, len);
+       if (*obuf == NULL) {
+               *obuf = kmalloc(len, GFP_NOFS);
+               if (!*obuf)
+                       return -ENOMEM;
+               olen = len;
+       }
+
+       ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
        if (ret)
                return ret;
        if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
        kfree(th);
 }
 
-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
-                                   struct ceph_crypto_key *secret,
-                                   void *buf, void *end)
+static int process_one_ticket(struct ceph_auth_client *ac,
+                             struct ceph_crypto_key *secret,
+                             void **p, void *end)
 {
        struct ceph_x_info *xi = ac->private;
-       int num;
-       void *p = buf;
+       int type;
+       u8 tkt_struct_v, blob_struct_v;
+       struct ceph_x_ticket_handler *th;
+       void *dbuf = NULL;
+       void *dp, *dend;
+       int dlen;
+       char is_enc;
+       struct timespec validity;
+       struct ceph_crypto_key old_key;
+       void *ticket_buf = NULL;
+       void *tp, *tpend;
+       struct ceph_timespec new_validity;
+       struct ceph_crypto_key new_session_key;
+       struct ceph_buffer *new_ticket_blob;
+       unsigned long new_expires, new_renew_after;
+       u64 new_secret_id;
        int ret;
-       char *dbuf;
-       char *ticket_buf;
-       u8 reply_struct_v;
 
-       dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!dbuf)
-               return -ENOMEM;
+       ceph_decode_need(p, end, sizeof(u32) + 1, bad);
 
-       ret = -ENOMEM;
-       ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!ticket_buf)
-               goto out_dbuf;
+       type = ceph_decode_32(p);
+       dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
 
-       ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
-       reply_struct_v = ceph_decode_8(&p);
-       if (reply_struct_v != 1)
+       tkt_struct_v = ceph_decode_8(p);
+       if (tkt_struct_v != 1)
                goto bad;
-       num = ceph_decode_32(&p);
-       dout("%d tickets\n", num);
-       while (num--) {
-               int type;
-               u8 tkt_struct_v, blob_struct_v;
-               struct ceph_x_ticket_handler *th;
-               void *dp, *dend;
-               int dlen;
-               char is_enc;
-               struct timespec validity;
-               struct ceph_crypto_key old_key;
-               void *tp, *tpend;
-               struct ceph_timespec new_validity;
-               struct ceph_crypto_key new_session_key;
-               struct ceph_buffer *new_ticket_blob;
-               unsigned long new_expires, new_renew_after;
-               u64 new_secret_id;
-
-               ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
-
-               type = ceph_decode_32(&p);
-               dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
-
-               tkt_struct_v = ceph_decode_8(&p);
-               if (tkt_struct_v != 1)
-                       goto bad;
-
-               th = get_ticket_handler(ac, type);
-               if (IS_ERR(th)) {
-                       ret = PTR_ERR(th);
-                       goto out;
-               }
 
-               /* blob for me */
-               dlen = ceph_x_decrypt(secret, &p, end, dbuf,
-                                     TEMP_TICKET_BUF_LEN);
-               if (dlen <= 0) {
-                       ret = dlen;
-                       goto out;
-               }
-               dout(" decrypted %d bytes\n", dlen);
-               dend = dbuf + dlen;
-               dp = dbuf;
+       th = get_ticket_handler(ac, type);
+       if (IS_ERR(th)) {
+               ret = PTR_ERR(th);
+               goto out;
+       }
 
-               tkt_struct_v = ceph_decode_8(&dp);
-               if (tkt_struct_v != 1)
-                       goto bad;
+       /* blob for me */
+       dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+       if (dlen <= 0) {
+               ret = dlen;
+               goto out;
+       }
+       dout(" decrypted %d bytes\n", dlen);
+       dp = dbuf;
+       dend = dp + dlen;
 
-               memcpy(&old_key, &th->session_key, sizeof(old_key));
-               ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
-               if (ret)
-                       goto out;
+       tkt_struct_v = ceph_decode_8(&dp);
+       if (tkt_struct_v != 1)
+               goto bad;
 
-               ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
-               ceph_decode_timespec(&validity, &new_validity);
-               new_expires = get_seconds() + validity.tv_sec;
-               new_renew_after = new_expires - (validity.tv_sec / 4);
-               dout(" expires=%lu renew_after=%lu\n", new_expires,
-                    new_renew_after);
+       memcpy(&old_key, &th->session_key, sizeof(old_key));
+       ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+       if (ret)
+               goto out;
 
-               /* ticket blob for service */
-               ceph_decode_8_safe(&p, end, is_enc, bad);
-               tp = ticket_buf;
-               if (is_enc) {
-                       /* encrypted */
-                       dout(" encrypted ticket\n");
-                       dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
-                                             TEMP_TICKET_BUF_LEN);
-                       if (dlen < 0) {
-                               ret = dlen;
-                               goto out;
-                       }
-                       dlen = ceph_decode_32(&tp);
-               } else {
-                       /* unencrypted */
-                       ceph_decode_32_safe(&p, end, dlen, bad);
-                       ceph_decode_need(&p, end, dlen, bad);
-                       ceph_decode_copy(&p, ticket_buf, dlen);
+       ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+       ceph_decode_timespec(&validity, &new_validity);
+       new_expires = get_seconds() + validity.tv_sec;
+       new_renew_after = new_expires - (validity.tv_sec / 4);
+       dout(" expires=%lu renew_after=%lu\n", new_expires,
+            new_renew_after);
+
+       /* ticket blob for service */
+       ceph_decode_8_safe(p, end, is_enc, bad);
+       if (is_enc) {
+               /* encrypted */
+               dout(" encrypted ticket\n");
+               dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+               if (dlen < 0) {
+                       ret = dlen;
+                       goto out;
                }
-               tpend = tp + dlen;
-               dout(" ticket blob is %d bytes\n", dlen);
-               ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
-               blob_struct_v = ceph_decode_8(&tp);
-               new_secret_id = ceph_decode_64(&tp);
-               ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
-               if (ret)
+               tp = ticket_buf;
+               dlen = ceph_decode_32(&tp);
+       } else {
+               /* unencrypted */
+               ceph_decode_32_safe(p, end, dlen, bad);
+               ticket_buf = kmalloc(dlen, GFP_NOFS);
+               if (!ticket_buf) {
+                       ret = -ENOMEM;
                        goto out;
-
-               /* all is well, update our ticket */
-               ceph_crypto_key_destroy(&th->session_key);
-               if (th->ticket_blob)
-                       ceph_buffer_put(th->ticket_blob);
-               th->session_key = new_session_key;
-               th->ticket_blob = new_ticket_blob;
-               th->validity = new_validity;
-               th->secret_id = new_secret_id;
-               th->expires = new_expires;
-               th->renew_after = new_renew_after;
-               dout(" got ticket service %d (%s) secret_id %lld len %d\n",
-                    type, ceph_entity_type_name(type), th->secret_id,
-                    (int)th->ticket_blob->vec.iov_len);
-               xi->have_keys |= th->service;
+               }
+               tp = ticket_buf;
+               ceph_decode_need(p, end, dlen, bad);
+               ceph_decode_copy(p, ticket_buf, dlen);
        }
+       tpend = tp + dlen;
+       dout(" ticket blob is %d bytes\n", dlen);
+       ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+       blob_struct_v = ceph_decode_8(&tp);
+       new_secret_id = ceph_decode_64(&tp);
+       ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+       if (ret)
+               goto out;
+
+       /* all is well, update our ticket */
+       ceph_crypto_key_destroy(&th->session_key);
+       if (th->ticket_blob)
+               ceph_buffer_put(th->ticket_blob);
+       th->session_key = new_session_key;
+       th->ticket_blob = new_ticket_blob;
+       th->validity = new_validity;
+       th->secret_id = new_secret_id;
+       th->expires = new_expires;
+       th->renew_after = new_renew_after;
+       dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+            type, ceph_entity_type_name(type), th->secret_id,
+            (int)th->ticket_blob->vec.iov_len);
+       xi->have_keys |= th->service;
 
-       ret = 0;
 out:
        kfree(ticket_buf);
-out_dbuf:
        kfree(dbuf);
        return ret;
 
@@ -270,6 +255,34 @@ bad:
        goto out;
 }
 
+static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+                                   struct ceph_crypto_key *secret,
+                                   void *buf, void *end)
+{
+       void *p = buf;
+       u8 reply_struct_v;
+       u32 num;
+       int ret;
+
+       ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+       if (reply_struct_v != 1)
+               return -EINVAL;
+
+       ceph_decode_32_safe(&p, end, num, bad);
+       dout("%d tickets\n", num);
+
+       while (num--) {
+               ret = process_one_ticket(ac, secret, &p, end);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+
+bad:
+       return -EINVAL;
+}
+
 static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
                                   struct ceph_x_ticket_handler *th,
                                   struct ceph_x_authorizer *au)
@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th;
        int ret = 0;
        struct ceph_x_authorize_reply reply;
+       void *preply = &reply;
        void *p = au->reply_buf;
        void *end = p + sizeof(au->reply_buf);
 
        th = get_ticket_handler(ac, au->service);
        if (IS_ERR(th))
                return PTR_ERR(th);
-       ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
+       ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
        if (ret < 0)
                return ret;
        if (ret != sizeof(reply))
index 067d3af2eaf61be41d601dc4324fc1a1565ab27d..61fcfc304f6869959b2ff510026ccb2e33700f21 100644 (file)
@@ -1181,7 +1181,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
        if (!m) {
                pr_info("alloc_msg unknown type %d\n", type);
                *skip = 1;
+       } else if (front_len > m->front_alloc_len) {
+               pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
+                          front_len, m->front_alloc_len,
+                          (unsigned int)con->peer_name.type,
+                          le64_to_cpu(con->peer_name.num));
+               ceph_msg_put(m);
+               m = ceph_msg_new(type, front_len, GFP_NOFS, false);
        }
+
        return m;
 }
 
index 488dd1a825c05b704a26d8c88582454f1745be4c..fdbc9a81d4c2033565c36815d425d0a7d80372b7 100644 (file)
@@ -775,7 +775,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
 EXPORT_SYMBOL(__skb_checksum_complete);
 
 /**
- *     skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
+ *     skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec.
  *     @skb: skbuff
  *     @hlen: hardware length
  *     @iov: io vector
index b65a5051361f2dea31a0fac078b3dd656e126cc8..cf8a95f48cff472d19d931506968a22b6612a12c 100644 (file)
@@ -2587,13 +2587,19 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
                return harmonize_features(skb, features);
        }
 
-       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
-                                              NETIF_F_HW_VLAN_STAG_TX);
+       features = netdev_intersect_features(features,
+                                            skb->dev->vlan_features |
+                                            NETIF_F_HW_VLAN_CTAG_TX |
+                                            NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
-               features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
-                               NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
-                               NETIF_F_HW_VLAN_STAG_TX;
+               features = netdev_intersect_features(features,
+                                                    NETIF_F_SG |
+                                                    NETIF_F_HIGHDMA |
+                                                    NETIF_F_FRAGLIST |
+                                                    NETIF_F_GEN_CSUM |
+                                                    NETIF_F_HW_VLAN_CTAG_TX |
+                                                    NETIF_F_HW_VLAN_STAG_TX);
 
        return harmonize_features(skb, features);
 }
@@ -4803,9 +4809,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
        sysfs_remove_link(&(dev->dev.kobj), linkname);
 }
 
-#define netdev_adjacent_is_neigh_list(dev, dev_list) \
-               (dev_list == &dev->adj_list.upper || \
-                dev_list == &dev->adj_list.lower)
+static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
+                                                struct net_device *adj_dev,
+                                                struct list_head *dev_list)
+{
+       return (dev_list == &dev->adj_list.upper ||
+               dev_list == &dev->adj_list.lower) &&
+               net_eq(dev_net(dev), dev_net(adj_dev));
+}
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
@@ -4835,7 +4846,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        pr_debug("dev_hold for %s, because of link added from %s to %s\n",
                 adj_dev->name, dev->name, adj_dev->name);
 
-       if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
                ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
                if (ret)
                        goto free_adj;
@@ -4856,7 +4867,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        return 0;
 
 remove_symlinks:
-       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
 free_adj:
        kfree(adj);
@@ -4889,7 +4900,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
        if (adj->master)
                sysfs_remove_link(&(dev->dev.kobj), "master");
 
-       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
 
        list_del_rcu(&adj->list);
@@ -5159,11 +5170,65 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+void netdev_adjacent_add_links(struct net_device *dev)
+{
+       struct netdev_adjacent *iter;
+
+       struct net *net = dev_net(dev);
+
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_add(dev, iter->dev,
+                                         &dev->adj_list.upper);
+       }
+
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_add(dev, iter->dev,
+                                         &dev->adj_list.lower);
+       }
+}
+
+void netdev_adjacent_del_links(struct net_device *dev)
+{
+       struct netdev_adjacent *iter;
+
+       struct net *net = dev_net(dev);
+
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_del(iter->dev, dev->name,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_del(dev, iter->dev->name,
+                                         &dev->adj_list.upper);
+       }
+
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_del(iter->dev, dev->name,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_del(dev, iter->dev->name,
+                                         &dev->adj_list.lower);
+       }
+}
+
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
 {
        struct netdev_adjacent *iter;
 
+       struct net *net = dev_net(dev);
+
        list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
                netdev_adjacent_sysfs_del(iter->dev, oldname,
                                          &iter->dev->adj_list.lower);
                netdev_adjacent_sysfs_add(iter->dev, dev,
@@ -5171,6 +5236,8 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
        }
 
        list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
                netdev_adjacent_sysfs_del(iter->dev, oldname,
                                          &iter->dev->adj_list.upper);
                netdev_adjacent_sysfs_add(iter->dev, dev,
@@ -6773,6 +6840,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 
        /* Send a netdev-removed uevent to the old namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
+       netdev_adjacent_del_links(dev);
 
        /* Actually switch the network namespace */
        dev_net_set(dev, net);
@@ -6787,6 +6855,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 
        /* Send a netdev-add uevent to the new namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
+       netdev_adjacent_add_links(dev);
 
        /* Fixup kobjects */
        err = device_rename(&dev->dev, dev->name);
index 6b5b6e7013cafec8a7b3767ed97a610bf35f1ae6..9d33dfffca19a992baf28e07ade0c7029c413ad0 100644 (file)
@@ -197,7 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
  * as destination. A new timer with the interval specified in the
  * configuration TLV is created. Upon each interval, the latest statistics
  * will be read from &bstats and the estimated rate will be stored in
- * &rate_est with the statistics lock grabed during this period.
+ * &rate_est with the statistics lock grabbed during this period.
  *
  * Returns 0 on success or a negative error code.
  *
index 9d3d9e78397b0f90f379addab25c7fd78294b3bc..2ddbce4cce144f62e8fed9dd39209fb3429b82a7 100644 (file)
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(gnet_stats_copy_queue);
  * @st: application specific statistics data
  * @len: length of data
  *
- * Appends the application sepecific statistics to the top level TLV created by
+ * Appends the application specific statistics to the top level TLV created by
  * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
  * handle is in backward compatibility mode.
  *
index 163b673f9e62d212230abd1c9b848c35ba923a0d..8d289697cc7adcb2ce28c9e793e49c25fbbba5b5 100644 (file)
@@ -2647,7 +2647,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
  * skb_seq_read() will return the remaining part of the block.
  *
  * Note 1: The size of each block of data returned can be arbitrary,
- *       this limitation is the cost for zerocopy seqeuental
+ *       this limitation is the cost for zerocopy sequential
  *       reads of potentially non linear data.
  *
  * Note 2: Fragment lists within fragments are not implemented
@@ -2781,7 +2781,7 @@ EXPORT_SYMBOL(skb_find_text);
 /**
  * skb_append_datato_frags - append the user data to a skb
  * @sk: sock  structure
- * @skb: skb structure to be appened with user data.
+ * @skb: skb structure to be appended with user data.
  * @getfrag: call back function to be used for getting the user data
  * @from: pointer to user message iov
  * @length: length of the iov message
@@ -3152,6 +3152,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
                goto done;
        }
+       /* switch back to head shinfo */
+       pinfo = skb_shinfo(p);
+
        if (pinfo->frag_list)
                goto merge;
        if (skb_gro_len(p) != pinfo->gso_size)
index 2714811afbd8bd3d35b5bd8619179eafaa71e19b..9c3f823e76a909014fe3d2414c14570dd5917e78 100644 (file)
@@ -166,7 +166,7 @@ EXPORT_SYMBOL(sk_ns_capable);
 /**
  * sk_capable - Socket global capability test
  * @sk: Socket to use a capability on or through
- * @cap: The global capbility to use
+ * @cap: The global capability to use
  *
  * Test to see if the opener of the socket had when the socket was
  * created and the current process has the capability @cap in all user
@@ -183,7 +183,7 @@ EXPORT_SYMBOL(sk_capable);
  * @sk: Socket to use a capability on or through
  * @cap: The capability to use
  *
- * Test to see if the opener of the socket had when the socke was created
+ * Test to see if the opener of the socket had when the socket was created
  * and the current process has the capability @cap over the network namespace
  * the socket is a member of.
  */
@@ -1822,6 +1822,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                                                           order);
                                        if (page)
                                                goto fill_page;
+                                       /* Do not retry other high order allocations */
+                                       order = 1;
+                                       max_page_order = 0;
                                }
                                order--;
                        }
@@ -1863,16 +1866,14 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
  * skb_page_frag_refill - check that a page_frag contains enough room
  * @sz: minimum size of the fragment we want to get
  * @pfrag: pointer to page_frag
- * @prio: priority for memory allocation
+ * @gfp: priority for memory allocation
  *
  * Note: While this allocator tries to use high order pages, there is
  * no guarantee that allocations succeed. Therefore, @sz MUST be
  * less or equal than PAGE_SIZE.
  */
-bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
 {
-       int order;
-
        if (pfrag->page) {
                if (atomic_read(&pfrag->page->_count) == 1) {
                        pfrag->offset = 0;
@@ -1883,20 +1884,21 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
                put_page(pfrag->page);
        }
 
-       order = SKB_FRAG_PAGE_ORDER;
-       do {
-               gfp_t gfp = prio;
-
-               if (order)
-                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
-               pfrag->page = alloc_pages(gfp, order);
+       pfrag->offset = 0;
+       if (SKB_FRAG_PAGE_ORDER) {
+               pfrag->page = alloc_pages(gfp | __GFP_COMP |
+                                         __GFP_NOWARN | __GFP_NORETRY,
+                                         SKB_FRAG_PAGE_ORDER);
                if (likely(pfrag->page)) {
-                       pfrag->offset = 0;
-                       pfrag->size = PAGE_SIZE << order;
+                       pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
                        return true;
                }
-       } while (--order >= 0);
-
+       }
+       pfrag->page = alloc_page(gfp);
+       if (likely(pfrag->page)) {
+               pfrag->size = PAGE_SIZE;
+               return true;
+       }
        return false;
 }
 EXPORT_SYMBOL(skb_page_frag_refill);
index 016b77ee88f0e2f1394c0456f67e9e01ec190d9f..6591d27e53a43413656a18a6eedb25d7bb5fe034 100644 (file)
@@ -246,7 +246,7 @@ lowpan_alloc_frag(struct sk_buff *skb, int size,
                        return ERR_PTR(-rc);
                }
        } else {
-               frag = ERR_PTR(ENOMEM);
+               frag = ERR_PTR(-ENOMEM);
        }
 
        return frag;
@@ -437,7 +437,7 @@ static void lowpan_setup(struct net_device *dev)
        /* Frame Control + Sequence Number + Address fields + Security Header */
        dev->hard_header_len    = 2 + 1 + 20 + 14;
        dev->needed_tailroom    = 2; /* FCS */
-       dev->mtu                = 1281;
+       dev->mtu                = IPV6_MIN_MTU;
        dev->tx_queue_len       = 0;
        dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
        dev->watchdog_timeo     = 0;
index ffec6ce510056960bb92d6ef0c3e44bc9493676f..32755cb7e64e3c02f77c85bdad6982d31311bcc5 100644 (file)
@@ -355,8 +355,6 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
        struct net *net = dev_net(skb->dev);
        struct lowpan_frag_info *frag_info = lowpan_cb(skb);
        struct ieee802154_addr source, dest;
-       struct netns_ieee802154_lowpan *ieee802154_lowpan =
-               net_ieee802154_lowpan(net);
        int err;
 
        source = mac_cb(skb)->source;
@@ -366,8 +364,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
        if (err < 0)
                goto err;
 
-       if (frag_info->d_size > ieee802154_lowpan->max_dsize)
+       if (frag_info->d_size > IPV6_MIN_MTU) {
+               net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
                goto err;
+       }
 
        fq = fq_find(net, frag_info, &source, &dest);
        if (fq != NULL) {
@@ -415,13 +415,6 @@ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       {
-               .procname       = "6lowpanfrag_max_datagram_size",
-               .data           = &init_net.ieee802154_lowpan.max_dsize,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        { }
 };
 
@@ -458,7 +451,6 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
                table[1].data = &ieee802154_lowpan->frags.low_thresh;
                table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
                table[2].data = &ieee802154_lowpan->frags.timeout;
-               table[3].data = &ieee802154_lowpan->max_dsize;
 
                /* Don't export sysctls to unprivileged users */
                if (net->user_ns != &init_user_ns)
@@ -533,7 +525,6 @@ static int __net_init lowpan_frags_init_net(struct net *net)
        ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
        ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
        ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
-       ieee802154_lowpan->max_dsize = 0xFFFF;
 
        inet_frags_init_net(&ieee802154_lowpan->frags);
 
index afed1aac2638398c472de6504f7f4a43e5b2d502..bda4bb8ae2608e4543a505d8ae039fa301a8a9e6 100644 (file)
@@ -79,10 +79,10 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
        idst->saddr = saddr;
 }
 
-static void tunnel_dst_set(struct ip_tunnel *t,
+static noinline void tunnel_dst_set(struct ip_tunnel *t,
                           struct dst_entry *dst, __be32 saddr)
 {
-       __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
+       __tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr);
 }
 
 static void tunnel_dst_reset(struct ip_tunnel *t)
@@ -106,7 +106,7 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
        struct dst_entry *dst;
 
        rcu_read_lock();
-       idst = this_cpu_ptr(t->dst_cache);
+       idst = raw_cpu_ptr(t->dst_cache);
        dst = rcu_dereference(idst->dst);
        if (dst && !atomic_inc_not_zero(&dst->__refcnt))
                dst = NULL;
@@ -764,9 +764,14 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
 
                t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
 
-               if (!t && (cmd == SIOCADDTUNNEL)) {
-                       t = ip_tunnel_create(net, itn, p);
-                       err = PTR_ERR_OR_ZERO(t);
+               if (cmd == SIOCADDTUNNEL) {
+                       if (!t) {
+                               t = ip_tunnel_create(net, itn, p);
+                               err = PTR_ERR_OR_ZERO(t);
+                               break;
+                       }
+
+                       err = -EEXIST;
                        break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
index fb173126f03dfb19ef5a215693a1985cc00450a2..7cbcaf4f0194f22b797f04b1fca1029a0a4e159e 100644 (file)
@@ -82,6 +82,52 @@ config NF_TABLES_ARP
        help
          This option enables the ARP support for nf_tables.
 
+config NF_NAT_IPV4
+       tristate "IPv4 NAT"
+       depends on NF_CONNTRACK_IPV4
+       default m if NETFILTER_ADVANCED=n
+       select NF_NAT
+       help
+         The IPv4 NAT option allows masquerading, port forwarding and other
+         forms of full Network Address Port Translation. This can be
+         controlled by iptables or nft.
+
+if NF_NAT_IPV4
+
+config NF_NAT_SNMP_BASIC
+       tristate "Basic SNMP-ALG support"
+       depends on NF_CONNTRACK_SNMP
+       depends on NETFILTER_ADVANCED
+       default NF_NAT && NF_CONNTRACK_SNMP
+       ---help---
+
+         This module implements an Application Layer Gateway (ALG) for
+         SNMP payloads.  In conjunction with NAT, it allows a network
+         management system to access multiple private networks with
+         conflicting addresses.  It works by modifying IP addresses
+         inside SNMP payloads to match IP-layer NAT mapping.
+
+         This is the "basic" form of SNMP-ALG, as described in RFC 2962
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_NAT_PROTO_GRE
+       tristate
+       depends on NF_CT_PROTO_GRE
+
+config NF_NAT_PPTP
+       tristate
+       depends on NF_CONNTRACK
+       default NF_CONNTRACK_PPTP
+       select NF_NAT_PROTO_GRE
+
+config NF_NAT_H323
+       tristate
+       depends on NF_CONNTRACK
+       default NF_CONNTRACK_H323
+
+endif # NF_NAT_IPV4
+
 config IP_NF_IPTABLES
        tristate "IP tables support (required for filtering/masq/NAT)"
        default m if NETFILTER_ADVANCED=n
@@ -170,19 +216,21 @@ config IP_NF_TARGET_SYNPROXY
          To compile it as a module, choose M here. If unsure, say N.
 
 # NAT + specific targets: nf_conntrack
-config NF_NAT_IPV4
-       tristate "IPv4 NAT"
+config IP_NF_NAT
+       tristate "iptables NAT support"
        depends on NF_CONNTRACK_IPV4
        default m if NETFILTER_ADVANCED=n
        select NF_NAT
+       select NF_NAT_IPV4
+       select NETFILTER_XT_NAT
        help
-         The IPv4 NAT option allows masquerading, port forwarding and other
-         forms of full Network Address Port Translation.  It is controlled by
-         the `nat' table in iptables: see the man page for iptables(8).
+         This enables the `nat' table in iptables. This allows masquerading,
+         port forwarding and other forms of full Network Address Port
+         Translation.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-if NF_NAT_IPV4
+if IP_NF_NAT
 
 config IP_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
@@ -214,47 +262,7 @@ config IP_NF_TARGET_REDIRECT
        (e.g. when running oldconfig). It selects
        CONFIG_NETFILTER_XT_TARGET_REDIRECT.
 
-endif
-
-config NF_NAT_SNMP_BASIC
-       tristate "Basic SNMP-ALG support"
-       depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
-       depends on NETFILTER_ADVANCED
-       default NF_NAT && NF_CONNTRACK_SNMP
-       ---help---
-
-         This module implements an Application Layer Gateway (ALG) for
-         SNMP payloads.  In conjunction with NAT, it allows a network
-         management system to access multiple private networks with
-         conflicting addresses.  It works by modifying IP addresses
-         inside SNMP payloads to match IP-layer NAT mapping.
-
-         This is the "basic" form of SNMP-ALG, as described in RFC 2962
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
-# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker.
-# From kconfig-language.txt:
-#
-#           <expr> '&&' <expr>                   (6)
-#
-# (6) Returns the result of min(/expr/, /expr/).
-
-config NF_NAT_PROTO_GRE
-       tristate
-       depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
-
-config NF_NAT_PPTP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT_IPV4
-       default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
-       select NF_NAT_PROTO_GRE
-
-config NF_NAT_H323
-       tristate
-       depends on NF_CONNTRACK && NF_NAT_IPV4
-       default NF_NAT_IPV4 && NF_CONNTRACK_H323
+endif # IP_NF_NAT
 
 # mangle + specific targets
 config IP_NF_MANGLE
index 33001621465b8d131e59cfce8920f87f3e46c7fc..edf4af32e9f28fcdb3a391abbb0d651b4a16af38 100644 (file)
@@ -43,7 +43,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 # the three instances of ip_tables
 obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
 obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
+obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
 
index eaa4b000c7b443898be7c5ce36f4da4eb20158a6..cbadb942c3328d3585add8e791fcd2e3d2cb1d67 100644 (file)
@@ -746,7 +746,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
        }
 
        n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
-       if (n) {
+       if (!IS_ERR(n)) {
                if (!(n->nud_state & NUD_VALID)) {
                        neigh_event_send(n, NULL);
                } else {
@@ -2265,9 +2265,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
                return rt;
 
        if (flp4->flowi4_proto)
-               rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
-                                                  flowi4_to_flowi(flp4),
-                                                  sk, 0);
+               rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+                                                       flowi4_to_flowi(flp4),
+                                                       sk, 0);
 
        return rt;
 }
index 0b239fc1816ed828862f19cdde0f08720a0b43ca..3e118dfddd02a0c16051b49ffba0953daed54225 100644 (file)
@@ -1690,14 +1690,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
        addrconf_mod_dad_work(ifp, 0);
 }
 
-/* Join to solicited addr multicast group. */
-
+/* Join to solicited addr multicast group.
+ * caller must hold RTNL */
 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
 {
        struct in6_addr maddr;
 
-       ASSERT_RTNL();
-
        if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
 
@@ -1705,12 +1703,11 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
        ipv6_dev_mc_inc(dev, &maddr);
 }
 
+/* caller must hold RTNL */
 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
 {
        struct in6_addr maddr;
 
-       ASSERT_RTNL();
-
        if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
 
@@ -1718,12 +1715,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
        __ipv6_dev_mc_dec(idev, &maddr);
 }
 
+/* caller must hold RTNL */
 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
 
-       ASSERT_RTNL();
-
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1732,12 +1728,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
        ipv6_dev_ac_inc(ifp->idev->dev, &addr);
 }
 
+/* caller must hold RTNL */
 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
 
-       ASSERT_RTNL();
-
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -3099,11 +3094,13 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
        write_unlock_bh(&idev->lock);
 
-       /* Step 5: Discard multicast list */
-       if (how)
+       /* Step 5: Discard anycast and multicast list */
+       if (how) {
+               ipv6_ac_destroy_dev(idev);
                ipv6_mc_destroy_dev(idev);
-       else
+       } else {
                ipv6_mc_down(idev);
+       }
 
        idev->tstamp = jiffies;
 
@@ -4773,24 +4770,21 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                addrconf_leave_solict(ifp->idev, &ifp->addr);
                if (!ipv6_addr_any(&ifp->peer_addr)) {
                        struct rt6_info *rt;
-                       struct net_device *dev = ifp->idev->dev;
-
-                       rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
-                                       dev->ifindex, 1);
-                       if (rt) {
-                               dst_hold(&rt->dst);
-                               if (ip6_del_rt(rt))
-                                       dst_free(&rt->dst);
-                       }
+
+                       rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
+                                                      ifp->idev->dev, 0, 0);
+                       if (rt && ip6_del_rt(rt))
+                               dst_free(&rt->dst);
                }
                dst_hold(&ifp->rt->dst);
 
                if (ip6_del_rt(ifp->rt))
                        dst_free(&ifp->rt->dst);
+
+               rt_genid_bump_ipv6(net);
                break;
        }
        atomic_inc(&net->ipv6.dev_addr_genid);
-       rt_genid_bump_ipv6(net);
 }
 
 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
index e6960457f62582c4ca41c674cc531d64ca76b038..98cc4cd570e23b96dc479956afe7bb6660a8a6c6 100644 (file)
@@ -8,6 +8,13 @@
 #include <net/addrconf.h>
 #include <net/ip.h>
 
+/* if ipv6 module registers this function is used by xfrm to force all
+ * sockets to relookup their nodes - this is fairly expensive, be
+ * careful
+ */
+void (*__fib6_flush_trees)(struct net *);
+EXPORT_SYMBOL(__fib6_flush_trees);
+
 #define IPV6_ADDR_SCOPE_TYPE(scope)    ((scope) << 16)
 
 static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
index 2101832446896245e54b608bb24f27d83e42a005..9a386842fd6278468ed6cc1b435d00f31b2ebe43 100644 (file)
@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        pac->acl_next = NULL;
        pac->acl_addr = *addr;
 
+       rtnl_lock();
        rcu_read_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
 error:
        rcu_read_unlock();
+       rtnl_unlock();
        if (pac)
                sock_kfree_s(sk, pac, sizeof(*pac));
        return err;
@@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        spin_unlock_bh(&ipv6_sk_ac_lock);
 
+       rtnl_lock();
        rcu_read_lock();
        dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
        if (dev)
                ipv6_dev_ac_dec(dev, &pac->acl_addr);
        rcu_read_unlock();
+       rtnl_unlock();
 
        sock_kfree_s(sk, pac, sizeof(*pac));
        return 0;
@@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk)
        spin_unlock_bh(&ipv6_sk_ac_lock);
 
        prev_index = 0;
+       rtnl_lock();
        rcu_read_lock();
        while (pac) {
                struct ipv6_ac_socklist *next = pac->acl_next;
@@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk)
                pac = next;
        }
        rcu_read_unlock();
+       rtnl_unlock();
 }
 
 static void aca_put(struct ifacaddr6 *ac)
@@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
        struct rt6_info *rt;
        int err;
 
+       ASSERT_RTNL();
+
        idev = in6_dev_get(dev);
 
        if (idev == NULL)
@@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
 {
        struct ifacaddr6 *aca, *prev_aca;
 
+       ASSERT_RTNL();
+
        write_lock_bh(&idev->lock);
        prev_aca = NULL;
        for (aca = idev->ac_list; aca; aca = aca->aca_next) {
@@ -341,6 +351,27 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
        return __ipv6_dev_ac_dec(idev, addr);
 }
 
+void ipv6_ac_destroy_dev(struct inet6_dev *idev)
+{
+       struct ifacaddr6 *aca;
+
+       write_lock_bh(&idev->lock);
+       while ((aca = idev->ac_list) != NULL) {
+               idev->ac_list = aca->aca_next;
+               write_unlock_bh(&idev->lock);
+
+               addrconf_leave_solict(idev, &aca->aca_addr);
+
+               dst_hold(&aca->aca_rt->dst);
+               ip6_del_rt(aca->aca_rt);
+
+               aca_put(aca);
+
+               write_lock_bh(&idev->lock);
+       }
+       write_unlock_bh(&idev->lock);
+}
+
 /*
  *     check if the interface has this anycast address
  *     called with rcu_read_lock()
index 76b7f5ee8f4c8a9d8e2b0369135e83d067a31afa..97b9fa8de37783a0da90671208e42a887a20332c 100644 (file)
@@ -1605,6 +1605,24 @@ static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
        fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
 }
 
+static int fib6_update_sernum(struct rt6_info *rt, void *arg)
+{
+       __u32 sernum = *(__u32 *)arg;
+
+       if (rt->rt6i_node &&
+           rt->rt6i_node->fn_sernum != sernum)
+               rt->rt6i_node->fn_sernum = sernum;
+
+       return 0;
+}
+
+static void fib6_flush_trees(struct net *net)
+{
+       __u32 new_sernum = fib6_new_sernum();
+
+       fib6_clean_all(net, fib6_update_sernum, &new_sernum);
+}
+
 /*
  *     Garbage collection
  */
@@ -1788,6 +1806,8 @@ int __init fib6_init(void)
                              NULL);
        if (ret)
                goto out_unregister_subsys;
+
+       __fib6_flush_trees = fib6_flush_trees;
 out:
        return ret;
 
index 5f19dfbc4c6a4039ae2a22eb065bed6d46565d83..f304471477dce45bcb8fc71f842ae8c27032f609 100644 (file)
@@ -314,6 +314,8 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
        struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
 
        t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
+       if (t && create)
+               return NULL;
        if (t || !create)
                return t;
 
@@ -1724,4 +1726,5 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
 MODULE_ALIAS_RTNL_LINK("ip6gre");
+MODULE_ALIAS_RTNL_LINK("ip6gretap");
 MODULE_ALIAS_NETDEV("ip6gre0");
index 315a55d66079cb7129dbfe183e31bcceffd63f99..0a3448b2888fbdc62181a531ad665ce8fce3a65f 100644 (file)
@@ -1009,7 +1009,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (final_dst)
                fl6->daddr = *final_dst;
 
-       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
 
@@ -1041,7 +1041,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (final_dst)
                fl6->daddr = *final_dst;
 
-       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
index f9de5a69507252a12cbf1efffbf416721d9c871a..69a84b464009cc3192d42acc8a2c4d1e8419ca61 100644 (file)
@@ -364,8 +364,12 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
             (t = rtnl_dereference(*tp)) != NULL;
             tp = &t->next) {
                if (ipv6_addr_equal(local, &t->parms.laddr) &&
-                   ipv6_addr_equal(remote, &t->parms.raddr))
+                   ipv6_addr_equal(remote, &t->parms.raddr)) {
+                       if (create)
+                               return NULL;
+
                        return t;
+               }
        }
        if (!create)
                return NULL;
index 7f52fd9fa7b0d694dfa35f6246133d379f8ea710..5833a2244467325caff4f75f5740adc809db235b 100644 (file)
@@ -253,8 +253,12 @@ static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
             (t = rtnl_dereference(*tp)) != NULL;
             tp = &t->next) {
                if (ipv6_addr_equal(local, &t->parms.laddr) &&
-                   ipv6_addr_equal(remote, &t->parms.raddr))
+                   ipv6_addr_equal(remote, &t->parms.raddr)) {
+                       if (create)
+                               return NULL;
+
                        return t;
+               }
        }
        if (!create)
                return NULL;
index 617f0958e164e7893ca70e80e09d1ed933c95b69..a23b655a7627a69046d956139946c00dda8825f4 100644 (file)
@@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        mc_lst->next = NULL;
        mc_lst->addr = *addr;
 
+       rtnl_lock();
        rcu_read_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
@@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        if (dev == NULL) {
                rcu_read_unlock();
+               rtnl_unlock();
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return -ENODEV;
        }
@@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        if (err) {
                rcu_read_unlock();
+               rtnl_unlock();
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return err;
        }
@@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        spin_unlock(&ipv6_sk_mc_lock);
 
        rcu_read_unlock();
+       rtnl_unlock();
 
        return 0;
 }
@@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
        if (!ipv6_addr_is_multicast(addr))
                return -EINVAL;
 
+       rtnl_lock();
        spin_lock(&ipv6_sk_mc_lock);
        for (lnk = &np->ipv6_mc_list;
             (mc_lst = rcu_dereference_protected(*lnk,
@@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
                        } else
                                (void) ip6_mc_leave_src(sk, mc_lst, NULL);
                        rcu_read_unlock();
+                       rtnl_unlock();
+
                        atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
                        kfree_rcu(mc_lst, rcu);
                        return 0;
                }
        }
        spin_unlock(&ipv6_sk_mc_lock);
+       rtnl_unlock();
 
        return -EADDRNOTAVAIL;
 }
@@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk)
        if (!rcu_access_pointer(np->ipv6_mc_list))
                return;
 
+       rtnl_lock();
        spin_lock(&ipv6_sk_mc_lock);
        while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
                                lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
@@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk)
                spin_lock(&ipv6_sk_mc_lock);
        }
        spin_unlock(&ipv6_sk_mc_lock);
+       rtnl_unlock();
 }
 
 int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
        struct ifmcaddr6 *mc;
        struct inet6_dev *idev;
 
+       ASSERT_RTNL();
+
        /* we need to take a reference on idev */
        idev = in6_dev_get(dev);
 
@@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
 {
        struct ifmcaddr6 *ma, **map;
 
+       ASSERT_RTNL();
+
        write_lock_bh(&idev->lock);
        for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
                if (ipv6_addr_equal(&ma->mca_addr, addr)) {
index ac93df16f5af678f8724346343517ef241a415f3..2812816aabdc0a2918b183094ac4a119b4c0f1fa 100644 (file)
@@ -57,9 +57,19 @@ config NFT_REJECT_IPV6
 
 config NF_LOG_IPV6
        tristate "IPv6 packet logging"
-       depends on NETFILTER_ADVANCED
+       default m if NETFILTER_ADVANCED=n
        select NF_LOG_COMMON
 
+config NF_NAT_IPV6
+       tristate "IPv6 NAT"
+       depends on NF_CONNTRACK_IPV6
+       depends on NETFILTER_ADVANCED
+       select NF_NAT
+       help
+         The IPv6 NAT option allows masquerading, port forwarding and other
+         forms of full Network Address Port Translation. This can be
+         controlled by iptables or nft.
+
 config IP6_NF_IPTABLES
        tristate "IP6 tables support (required for filtering)"
        depends on INET && IPV6
@@ -232,19 +242,21 @@ config IP6_NF_SECURITY
 
          If unsure, say N.
 
-config NF_NAT_IPV6
-       tristate "IPv6 NAT"
+config IP6_NF_NAT
+       tristate "ip6tables NAT support"
        depends on NF_CONNTRACK_IPV6
        depends on NETFILTER_ADVANCED
        select NF_NAT
+       select NF_NAT_IPV6
+       select NETFILTER_XT_NAT
        help
-         The IPv6 NAT option allows masquerading, port forwarding and other
-         forms of full Network Address Port Translation. It is controlled by
-         the `nat' table in ip6tables, see the man page for ip6tables(8).
+         This enables the `nat' table in ip6tables. This allows masquerading,
+         port forwarding and other forms of full Network Address Port
+         Translation.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-if NF_NAT_IPV6
+if IP6_NF_NAT
 
 config IP6_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
@@ -265,7 +277,7 @@ config IP6_NF_TARGET_NPT
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-endif # NF_NAT_IPV6
+endif # IP6_NF_NAT
 
 endif # IP6_NF_IPTABLES
 
index c0b263104ed23170e15b3278408fe95f36c77da9..c3d3286db4bb804546ee8b0488ecdad1441bb06b 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
 obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
 obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
 obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
-obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
+obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
 
 # objects for l3 independent conntrack
 nf_conntrack_ipv6-y  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
index f23fbd28a501ed5c3438abb7f1cbbec85233688b..bafde82324c57369abfd6f5b37decafadcb08d9c 100644 (file)
@@ -314,7 +314,6 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
 
                memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
                rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
-               rt->rt6i_genid = rt_genid_ipv6(net);
                INIT_LIST_HEAD(&rt->rt6i_siblings);
        }
        return rt;
@@ -1098,9 +1097,6 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
         * DST_OBSOLETE_FORCE_CHK which forces validation calls down
         * into this function always.
         */
-       if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
-               return NULL;
-
        if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
                return NULL;
 
index 13752d96275e8b9142539a201ea1ac6f45f883ba..b704a9356208f5cc3064085b9d82eab52542dd5a 100644 (file)
@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        /* If PMTU discovery was enabled, use the MTU that was discovered */
        dst = sk_dst_get(tunnel->sock);
        if (dst != NULL) {
-               u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+               u32 pmtu = dst_mtu(dst);
+
                if (pmtu != 0)
                        session->mtu = session->mru = pmtu -
                                PPPOL2TP_HEADER_OVERHEAD;
index 0375009ddc0db39fd5a365db8cb5e4122219d864..399ad82c997f99c33833c03c67e6acddf7019b6f 100644 (file)
@@ -541,6 +541,8 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
                        continue;
                if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
                        continue;
+               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                       continue;
 
                if (!compat)
                        compat = &sdata->vif.bss_conf.chandef;
index 3db96648b45a02c0e0235b724210a15ba32747b7..86173c0de40e91d658e883514232c720a4c51e0e 100644 (file)
@@ -167,7 +167,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
        p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
                        sta->ampdu_mlme.dialog_token_allocator + 1);
        p += scnprintf(p, sizeof(buf) + buf - p,
-                      "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
+                      "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
 
        for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
                tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
index 01eede7406a55610e771c7cd906cbedcfff1fa6a..f75e5f132c5ad4a816fc933defedda38f0695e66 100644 (file)
@@ -1175,8 +1175,8 @@ static void ieee80211_iface_work(struct work_struct *work)
                        if (sta) {
                                u16 last_seq;
 
-                               last_seq = le16_to_cpu(
-                                       sta->last_seq_ctrl[rx_agg->tid]);
+                               last_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(
+                                       sta->last_seq_ctrl[rx_agg->tid]));
 
                                __ieee80211_start_rx_ba_session(sta,
                                                0, 0,
index 63b874101b2763d5997dc561073e96807596c11a..c47194d2714933bd8e1dc3658ffd165cd50a5a4a 100644 (file)
@@ -959,7 +959,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                if (!matches_local)
                        event = CNF_RJCT;
                if (!mesh_plink_free_count(sdata) ||
-                   (sta->llid != llid || sta->plid != plid))
+                   sta->llid != llid ||
+                   (sta->plid && sta->plid != plid))
                        event = CNF_IGNR;
                else
                        event = CNF_ACPT;
@@ -1080,6 +1081,10 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
                goto unlock_rcu;
        }
 
+       /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+       if (!sta->plid && event == CNF_ACPT)
+               sta->plid = plid;
+
        changed |= mesh_plink_fsm(sdata, sta, event);
 
 unlock_rcu:
index 31a8afaf73323bc09fb99f8e960b17c3fa2e31fd..b82a12a9f0f110779756027944dce6293d1387fb 100644 (file)
@@ -4376,8 +4376,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        rcu_read_unlock();
 
        if (bss->wmm_used && bss->uapsd_supported &&
-           (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
-           sdata->wmm_acm != 0xff) {
+           (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
                assoc_data->uapsd = true;
                ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
        } else {
index c6ee2139fbc579bab35e8cc5bfb6f3b4994930d2..a1e433b88c66ab7413eb8813b7010b56b0a4c450 100644 (file)
@@ -1094,8 +1094,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        unsigned long flags;
        struct ps_data *ps;
 
-       if (sdata->vif.type == NL80211_IFTYPE_AP ||
-           sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+               sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+                                    u.ap);
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP)
                ps = &sdata->bss->ps;
        else if (ieee80211_vif_is_mesh(&sdata->vif))
                ps = &sdata->u.mesh.ps;
@@ -1819,7 +1822,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
        if (sdata->vif.bss_conf.use_short_slot)
                sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
-       sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
+       sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period;
        sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
 
        sinfo->sta_flags.set = 0;
index 3c3069fd69718277fc719e82d7bf2c51bb9747db..547838822d5e9c0d7064d72e2dc3be1ff2c14e14 100644 (file)
@@ -462,7 +462,10 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
                        skb->pkt_type = PACKET_OTHERHOST;
                break;
        default:
-               break;
+               spin_unlock_bh(&sdata->mib_lock);
+               pr_debug("invalid dest mode\n");
+               kfree_skb(skb);
+               return NET_RX_DROP;
        }
 
        spin_unlock_bh(&sdata->mib_lock);
@@ -573,6 +576,7 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
        ret = mac802154_parse_frame_start(skb, &hdr);
        if (ret) {
                pr_debug("got invalid frame\n");
+               kfree_skb(skb);
                return;
        }
 
index ad751fe2e82b8ca017bc83f07d3dd7e9f04229f5..6d77cce481d59b798babc9deba9234dff31c7dce 100644 (file)
@@ -499,7 +499,7 @@ config NFT_LIMIT
 config NFT_NAT
        depends on NF_TABLES
        depends on NF_CONNTRACK
-       depends on NF_NAT
+       select NF_NAT
        tristate "Netfilter nf_tables nat module"
        help
          This option adds the "nat" expression that you can use to perform
@@ -747,7 +747,9 @@ config NETFILTER_XT_TARGET_LED
 
 config NETFILTER_XT_TARGET_LOG
        tristate "LOG target support"
-       depends on NF_LOG_IPV4 && NF_LOG_IPV6
+       select NF_LOG_COMMON
+       select NF_LOG_IPV4
+       select NF_LOG_IPV6 if IPV6
        default m if NETFILTER_ADVANCED=n
        help
          This option adds a `LOG' target, which allows you to create rules in
@@ -764,6 +766,14 @@ config NETFILTER_XT_TARGET_MARK
        (e.g. when running oldconfig). It selects
        CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
 
+config NETFILTER_XT_NAT
+       tristate '"SNAT and DNAT" targets support'
+       depends on NF_NAT
+       ---help---
+       This option enables the SNAT and DNAT targets.
+
+       To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_NETMAP
        tristate '"NETMAP" target support'
        depends on NF_NAT
@@ -837,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY
        tristate '"TPROXY" target transparent proxying support'
        depends on NETFILTER_XTABLES
        depends on NETFILTER_ADVANCED
+       depends on (IPV6 || IPV6=n)
        depends on IP_NF_MANGLE
        select NF_DEFRAG_IPV4
        select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
index 8308624a406ac0d891338e73c44abe6a5fa4c8b0..fad5fdba34e5873c0535037bf709c343ebfd55e3 100644 (file)
@@ -95,7 +95,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
 obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
-obj-$(CONFIG_NF_NAT) += xt_nat.o
+obj-$(CONFIG_NETFILTER_XT_NAT) += xt_nat.o
 
 # targets
 obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
index a93c97f106d4a5022cd0e1196ee70ecdfd8c2873..024a2e25c8a49448afbeed4f780912b24a6318a3 100644 (file)
@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
 struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
 
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
 #endif
@@ -72,7 +72,7 @@ int nf_register_hook(struct nf_hook_ops *reg)
        }
        list_add_rcu(&reg->list, elem->list.prev);
        mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
        static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        return 0;
@@ -84,7 +84,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
        mutex_lock(&nf_hook_mutex);
        list_del_rcu(&reg->list);
        mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
        static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        synchronize_net();
index e6836755c45d4168bb6ccc9ab6f12708e9e17f3d..5c34e8d42e0190a14ec31c1238bde5929fdf0539 100644 (file)
@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        {
                .hook           = ip_vs_local_reply6,
                .owner          = THIS_MODULE,
-               .pf             = NFPROTO_IPV4,
+               .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_NAT_DST + 1,
        },
index 6f70bdd3a90ad85c72cb100de997d2f5b5bc40a1..56896a412bcec7ae01726734115d19bc04079274 100644 (file)
@@ -38,6 +38,7 @@
 #include <net/route.h>                  /* for ip_route_output */
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
 #include <net/addrconf.h>
 #include <linux/icmpv6.h>
 #include <linux/netfilter.h>
@@ -862,11 +863,15 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                old_iph = ip_hdr(skb);
        }
 
-       skb->transport_header = skb->network_header;
-
        /* fix old IP header checksum */
        ip_send_check(old_iph);
 
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+       if (IS_ERR(skb))
+               goto tx_error;
+
+       skb->transport_header = skb->network_header;
+
        skb_push(skb, sizeof(struct iphdr));
        skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -900,7 +905,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        return NF_STOLEN;
 
   tx_error:
-       kfree_skb(skb);
+       if (!IS_ERR(skb))
+               kfree_skb(skb);
        rcu_read_unlock();
        LeaveFunction(10);
        return NF_STOLEN;
@@ -953,6 +959,11 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                old_iph = ipv6_hdr(skb);
        }
 
+       /* GSO: we need to provide proper SKB_GSO_ value for IPv6 */
+       skb = iptunnel_handle_offloads(skb, false, 0); /* SKB_GSO_SIT/IPV6 */
+       if (IS_ERR(skb))
+               goto tx_error;
+
        skb->transport_header = skb->network_header;
 
        skb_push(skb, sizeof(struct ipv6hdr));
@@ -988,7 +999,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        return NF_STOLEN;
 
 tx_error:
-       kfree_skb(skb);
+       if (!IS_ERR(skb))
+               kfree_skb(skb);
        rcu_read_unlock();
        LeaveFunction(10);
        return NF_STOLEN;
index c138b8fbe280af6886693421a7fe8d9a288156cf..f37f0716a9fc04e58f69bc826e827ce99546cec2 100644 (file)
@@ -222,6 +222,51 @@ replay:
        }
 }
 
+struct nfnl_err {
+       struct list_head        head;
+       struct nlmsghdr         *nlh;
+       int                     err;
+};
+
+static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
+{
+       struct nfnl_err *nfnl_err;
+
+       nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
+       if (nfnl_err == NULL)
+               return -ENOMEM;
+
+       nfnl_err->nlh = nlh;
+       nfnl_err->err = err;
+       list_add_tail(&nfnl_err->head, list);
+
+       return 0;
+}
+
+static void nfnl_err_del(struct nfnl_err *nfnl_err)
+{
+       list_del(&nfnl_err->head);
+       kfree(nfnl_err);
+}
+
+static void nfnl_err_reset(struct list_head *err_list)
+{
+       struct nfnl_err *nfnl_err, *next;
+
+       list_for_each_entry_safe(nfnl_err, next, err_list, head)
+               nfnl_err_del(nfnl_err);
+}
+
+static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
+{
+       struct nfnl_err *nfnl_err, *next;
+
+       list_for_each_entry_safe(nfnl_err, next, err_list, head) {
+               netlink_ack(skb, nfnl_err->nlh, nfnl_err->err);
+               nfnl_err_del(nfnl_err);
+       }
+}
+
 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
                                u_int16_t subsys_id)
 {
@@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
        const struct nfnetlink_subsystem *ss;
        const struct nfnl_callback *nc;
        bool success = true, done = false;
+       static LIST_HEAD(err_list);
        int err;
 
        if (subsys_id >= NFNL_SUBSYS_COUNT)
@@ -287,6 +333,7 @@ replay:
                type = nlh->nlmsg_type;
                if (type == NFNL_MSG_BATCH_BEGIN) {
                        /* Malformed: Batch begin twice */
+                       nfnl_err_reset(&err_list);
                        success = false;
                        goto done;
                } else if (type == NFNL_MSG_BATCH_END) {
@@ -333,6 +380,7 @@ replay:
                         * original skb.
                         */
                        if (err == -EAGAIN) {
+                               nfnl_err_reset(&err_list);
                                ss->abort(skb);
                                nfnl_unlock(subsys_id);
                                kfree_skb(nskb);
@@ -341,11 +389,24 @@ replay:
                }
 ack:
                if (nlh->nlmsg_flags & NLM_F_ACK || err) {
+                       /* Errors are delivered once the full batch has been
+                        * processed, this avoids that the same error is
+                        * reported several times when replaying the batch.
+                        */
+                       if (nfnl_err_add(&err_list, nlh, err) < 0) {
+                               /* We failed to enqueue an error, reset the
+                                * list of errors and send OOM to userspace
+                                * pointing to the batch header.
+                                */
+                               nfnl_err_reset(&err_list);
+                               netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
+                               success = false;
+                               goto done;
+                       }
                        /* We don't stop processing the batch on errors, thus,
                         * userspace gets all the errors that the batch
                         * triggers.
                         */
-                       netlink_ack(skb, nlh, err);
                        if (err)
                                success = false;
                }
@@ -361,6 +422,7 @@ done:
        else
                ss->abort(skb);
 
+       nfnl_err_deliver(&err_list, oskb);
        nfnl_unlock(subsys_id);
        kfree_skb(nskb);
 }
index 28fb8f38e6ba7df25fc8120dd82080d817f638be..8892b7b6184aaf9beafac3da8a665f5f4b696247 100644 (file)
@@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set,
 static void nft_hash_destroy(const struct nft_set *set)
 {
        const struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl;
+       const struct bucket_table *tbl = priv->tbl;
        struct nft_hash_elem *he, *next;
        unsigned int i;
 
-       tbl = rht_dereference(priv->tbl, priv);
-       for (i = 0; i < tbl->size; i++)
-               rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node)
+       for (i = 0; i < tbl->size; i++) {
+               for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node);
+                    he != NULL; he = next) {
+                       next = rht_entry(he->node.next, struct nft_hash_elem, node);
                        nft_hash_elem_destroy(set, he);
-
+               }
+       }
        rhashtable_destroy(priv);
 }
 
index e1836ff881994dc08e957d414c6e14a0fbbff919..46214f245665a0f70138cbd025bb9d1917a7e32f 100644 (file)
@@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set)
        struct nft_rbtree_elem *rbe;
        struct rb_node *node;
 
-       spin_lock_bh(&nft_rbtree_lock);
        while ((node = priv->root.rb_node) != NULL) {
                rb_erase(node, &priv->root);
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
                nft_rbtree_elem_destroy(set, rbe);
        }
-       spin_unlock_bh(&nft_rbtree_lock);
 }
 
 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
index f4e833005320fa7da4c0deb41bba272e8c54bd8e..7198d660b4dea1e9e79c6f9a13f4e6669bca569d 100644 (file)
@@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct xt_mtchk_param *par)
        if (info->invert & ~1)
                return -EINVAL;
 
-       return info->id ? 0 : -EINVAL;
+       return 0;
 }
 
 static bool
index 7228ec3faf19cdc02a685caa04ccbf04ddf8005a..64dc864a417f2b5910d71ab8641cd31d5367eba1 100644 (file)
@@ -78,11 +78,12 @@ static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
 
 /* Check if need to build a reply message.
  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
-static bool ovs_must_notify(struct genl_info *info,
-                           const struct genl_multicast_group *grp)
+static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
+                           unsigned int group)
 {
        return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
-               netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
+              genl_has_listeners(family, genl_info_net(info)->genl_sock,
+                                 group);
 }
 
 static void ovs_notify(struct genl_family *family,
@@ -265,8 +266,11 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
                upcall.key = &key;
                upcall.userdata = NULL;
                upcall.portid = ovs_vport_find_upcall_portid(p, skb);
-               ovs_dp_upcall(dp, skb, &upcall);
-               consume_skb(skb);
+               error = ovs_dp_upcall(dp, skb, &upcall);
+               if (unlikely(error))
+                       kfree_skb(skb);
+               else
+                       consume_skb(skb);
                stats_counter = &stats->n_missed;
                goto out;
        }
@@ -404,7 +408,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 {
        struct ovs_header *upcall;
        struct sk_buff *nskb = NULL;
-       struct sk_buff *user_skb; /* to be queued to userspace */
+       struct sk_buff *user_skb = NULL; /* to be queued to userspace */
        struct nlattr *nla;
        struct genl_info info = {
                .dst_sk = ovs_dp_get_net(dp)->genl_sock,
@@ -494,9 +498,11 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
 
        err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
+       user_skb = NULL;
 out:
        if (err)
                skb_tx_error(skb);
+       kfree_skb(user_skb);
        kfree_skb(nskb);
        return err;
 }
@@ -758,7 +764,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
 {
        struct sk_buff *skb;
 
-       if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
+       if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
                return NULL;
 
        skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
index 14c98e48f261ee49656eabb198d62648890b63b3..0f62326c0f5ea7581f996441da183da1e7899bfb 100644 (file)
@@ -54,7 +54,7 @@ static int rfkill_gpio_set_power(void *data, bool blocked)
        if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled)
                clk_disable(rfkill->clk);
 
-       rfkill->clk_enabled = blocked;
+       rfkill->clk_enabled = !blocked;
 
        return 0;
 }
@@ -158,10 +158,12 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
        { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
+       { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
        { "BCM4752", RFKILL_TYPE_GPS },
        { "LNV4752", RFKILL_TYPE_GPS },
        { },
 };
+MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
 #endif
 
 static struct platform_driver rfkill_gpio_driver = {
index b45d080e64a736294167b192a660eb79718c808c..1b24191167f1f5de119271f05a3f7f6dcfb5e376 100644 (file)
@@ -1143,7 +1143,7 @@ static long rxrpc_read(const struct key *key,
                if (copy_to_user(xdr, (s), _l) != 0)                    \
                        goto fault;                                     \
                if (_l & 3 &&                                           \
-                   copy_to_user((u8 *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
+                   copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
                        goto fault;                                     \
                xdr += (_l + 3) >> 2;                                   \
        } while(0)
index 3a633debb6df00f4bf13f51bacbdad0829ad321b..ad57f4444b9c65cdc7abe6eb588868c4e2b65cd2 100644 (file)
@@ -526,9 +526,11 @@ pop_stack:
                match_idx = stack[--stackp];
                cur_match = tcf_em_get_match(tree, match_idx);
 
-               if (tcf_em_early_end(cur_match, res))
+               if (tcf_em_early_end(cur_match, res)) {
+                       if (tcf_em_is_inverted(cur_match))
+                               res = !res;
                        goto pop_stack;
-               else {
+               else {
                        match_idx++;
                        goto proceed;
                }
index ed30e436128bff7c556e0c334e4cb5ae8d088e5e..fb666d1e4de3197cb81b0b2287b2dcef2df294a9 100644 (file)
@@ -133,10 +133,16 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
        --sch->q.qlen;
 }
 
+/* private part of skb->cb[] that a qdisc is allowed to use
+ * is limited to QDISC_CB_PRIV_LEN bytes.
+ * As a flow key might be too large, we store a part of it only.
+ */
+#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
+
 struct choke_skb_cb {
        u16                     classid;
        u8                      keys_valid;
-       struct flow_keys        keys;
+       u8                      keys[QDISC_CB_PRIV_LEN - 3];
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -163,22 +169,26 @@ static u16 choke_get_classid(const struct sk_buff *skb)
 static bool choke_match_flow(struct sk_buff *skb1,
                             struct sk_buff *skb2)
 {
+       struct flow_keys temp;
+
        if (skb1->protocol != skb2->protocol)
                return false;
 
        if (!choke_skb_cb(skb1)->keys_valid) {
                choke_skb_cb(skb1)->keys_valid = 1;
-               skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
+               skb_flow_dissect(skb1, &temp);
+               memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
        }
 
        if (!choke_skb_cb(skb2)->keys_valid) {
                choke_skb_cb(skb2)->keys_valid = 1;
-               skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
+               skb_flow_dissect(skb2, &temp);
+               memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
        }
 
        return !memcmp(&choke_skb_cb(skb1)->keys,
                       &choke_skb_cb(skb2)->keys,
-                      sizeof(struct flow_keys));
+                      CHOKE_K_LEN);
 }
 
 /*
index eb71d49e76537f17d48e3eeaa43661fb09788f41..634a2abb5f3a25f15ca99c4737190c1e5e17b27a 100644 (file)
@@ -4243,7 +4243,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
        transport = asoc->peer.primary_path;
 
        status.sstat_assoc_id = sctp_assoc2id(asoc);
-       status.sstat_state = asoc->state;
+       status.sstat_state = sctp_assoc_to_state(asoc);
        status.sstat_rwnd =  asoc->peer.rwnd;
        status.sstat_unackdata = asoc->unack_data;
 
index 95ee7d8682e7447f45f5ddb0ddbde5fff196c84e..4cdbc107606f037c6404f9947beb80174b961e80 100644 (file)
@@ -734,8 +734,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
        }
 
        memset(&tss, 0, sizeof(tss));
-       if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
-            skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP) &&
+       if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
            ktime_to_timespec_cond(skb->tstamp, tss.ts + 0))
                empty = 0;
        if (shhwtstamps &&
@@ -1997,6 +1996,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
        if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
                return -EFAULT;
 
+       if (kmsg->msg_name == NULL)
+               kmsg->msg_namelen = 0;
+
        if (kmsg->msg_namelen < 0)
                return -EINVAL;
 
@@ -2602,7 +2604,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
  *
  *     This function is called by a protocol handler that wants to
  *     advertise its address family, and have it linked into the
- *     socket interface. The value ops->family coresponds to the
+ *     socket interface. The value ops->family corresponds to the
  *     socket system call protocol family.
  */
 int sock_register(const struct net_proto_family *ops)
index df7b1332a1ec2fa80834b94c233f38a427a0f350..7257164af91bf2f80318bd88d6ed74071df9ce38 100644 (file)
@@ -6969,6 +6969,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
        struct nlattr *data = ((void **)skb->cb)[2];
        enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
 
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
        nla_nest_end(skb, data);
        genlmsg_end(skb, hdr);
 
@@ -9294,6 +9297,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
        void *hdr = ((void **)skb->cb)[1];
        struct nlattr *data = ((void **)skb->cb)[2];
 
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
        if (WARN_ON(!rdev->cur_cmd_info)) {
                kfree_skb(skb);
                return -EINVAL;
index beeed602aeb379f2ddfbd74a61c798cd371636ba..fdde51f4271adf22b26e36c46f76c2b93c4ce56e 100644 (file)
 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
 #define XFRM_MAX_QUEUE_LEN     100
 
+struct xfrm_flo {
+       struct dst_entry *dst_orig;
+       u8 flags;
+};
+
 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
                                                __read_mostly;
@@ -1877,13 +1882,14 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
 }
 
 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
-                                                struct dst_entry *dst,
+                                                struct xfrm_flo *xflo,
                                                 const struct flowi *fl,
                                                 int num_xfrms,
                                                 u16 family)
 {
        int err;
        struct net_device *dev;
+       struct dst_entry *dst;
        struct dst_entry *dst1;
        struct xfrm_dst *xdst;
 
@@ -1891,9 +1897,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
        if (IS_ERR(xdst))
                return xdst;
 
-       if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
+       if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
+           net->xfrm.sysctl_larval_drop ||
+           num_xfrms <= 0)
                return xdst;
 
+       dst = xflo->dst_orig;
        dst1 = &xdst->u.dst;
        dst_hold(dst);
        xdst->route = dst;
@@ -1935,7 +1944,7 @@ static struct flow_cache_object *
 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                   struct flow_cache_object *oldflo, void *ctx)
 {
-       struct dst_entry *dst_orig = (struct dst_entry *)ctx;
+       struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
        struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
        struct xfrm_dst *xdst, *new_xdst;
        int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
@@ -1976,7 +1985,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                        goto make_dummy_bundle;
        }
 
-       new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
+       new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
+                                                 xflo->dst_orig);
        if (IS_ERR(new_xdst)) {
                err = PTR_ERR(new_xdst);
                if (err != -EAGAIN)
@@ -2010,7 +2020,7 @@ make_dummy_bundle:
        /* We found policies, but there's no bundles to instantiate:
         * either because the policy blocks, has no transformations or
         * we could not build template (no xfrm_states).*/
-       xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
+       xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
        if (IS_ERR(xdst)) {
                xfrm_pols_put(pols, num_pols);
                return ERR_CAST(xdst);
@@ -2104,13 +2114,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
        }
 
        if (xdst == NULL) {
+               struct xfrm_flo xflo;
+
+               xflo.dst_orig = dst_orig;
+               xflo.flags = flags;
+
                /* To accelerate a bit...  */
                if ((dst_orig->flags & DST_NOXFRM) ||
                    !net->xfrm.policy_count[XFRM_POLICY_OUT])
                        goto nopol;
 
                flo = flow_cache_lookup(net, fl, family, dir,
-                                       xfrm_bundle_lookup, dst_orig);
+                                       xfrm_bundle_lookup, &xflo);
                if (flo == NULL)
                        goto nopol;
                if (IS_ERR(flo)) {
@@ -2138,7 +2153,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                        xfrm_pols_put(pols, drop_pols);
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
 
-                       return make_blackhole(net, family, dst_orig);
+                       return ERR_PTR(-EREMOTE);
                }
 
                err = -EAGAIN;
@@ -2195,6 +2210,23 @@ dropdst:
 }
 EXPORT_SYMBOL(xfrm_lookup);
 
+/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
+ * Otherwise we may send out blackholed packets.
+ */
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+                                   const struct flowi *fl,
+                                   struct sock *sk, int flags)
+{
+       struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
+                                           flags | XFRM_LOOKUP_QUEUE);
+
+       if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
+               return make_blackhole(net, dst_orig->ops->family, dst_orig);
+
+       return dst;
+}
+EXPORT_SYMBOL(xfrm_lookup_route);
+
 static inline int
 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
 {
@@ -2460,7 +2492,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
 
        skb_dst_force(skb);
 
-       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
+       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
        if (IS_ERR(dst)) {
                res = 0;
                dst = NULL;
index b385bcbbf2f58e03d21ec155a304cf3baed7cdd8..4d08b398411fdec9b6707b4cd0db2fd73f1b13fa 100755 (executable)
@@ -2133,7 +2133,10 @@ sub process {
 # Check for improperly formed commit descriptions
                if ($in_commit_log &&
                    $line =~ /\bcommit\s+[0-9a-f]{5,}/i &&
-                   $line !~ /\b[Cc]ommit [0-9a-f]{12,40} \("/) {
+                   !($line =~ /\b[Cc]ommit [0-9a-f]{12,40} \("/ ||
+                     ($line =~ /\b[Cc]ommit [0-9a-f]{12,40}\s*$/ &&
+                      defined $rawlines[$linenr] &&
+                      $rawlines[$linenr] =~ /^\s*\("/))) {
                        $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i;
                        my $init_char = $1;
                        my $orig_commit = lc($2);
index cbfd269a6011154b21cf2d76b2894915577396ec..293828bfd4ac9c248203ee24302381a6fc4b6b8d 100755 (executable)
@@ -197,6 +197,9 @@ exuberant()
        --regex-c++='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/'          \
        --regex-c++='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/'      \
        --regex-c++='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \
+       --regex-c++='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/'       \
+       --regex-c++='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/'    \
+       --regex-c++='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/'\
        --regex-c='/PCI_OP_READ\((\w*).*[1-4]\)/pci_bus_read_config_\1/' \
        --regex-c='/PCI_OP_WRITE\((\w*).*[1-4]\)/pci_bus_write_config_\1/' \
        --regex-c='/DEFINE_(MUTEX|SEMAPHORE|SPINLOCK)\((\w*)/\2/v/'     \
@@ -260,6 +263,9 @@ emacs()
        --regex='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/'      \
        --regex='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/'  \
        --regex='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \
+       --regex='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/'           \
+       --regex='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/'        \
+       --regex='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/'    \
        --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'           \
        --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \
        --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'\
index 9acc77eae4872a5c3f1726e05737a381576a5151..0032278567ad8c0ecc4228a6c2f065d6cd4c85e8 100644 (file)
@@ -1782,14 +1782,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
 {
        struct snd_pcm_hw_params *params = arg;
        snd_pcm_format_t format;
-       int channels, width;
+       int channels;
+       ssize_t frame_size;
 
        params->fifo_size = substream->runtime->hw.fifo_size;
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
                format = params_format(params);
                channels = params_channels(params);
-               width = snd_pcm_format_physical_width(format);
-               params->fifo_size /= width * channels;
+               frame_size = snd_pcm_format_size(format, channels);
+               if (frame_size > 0)
+                       params->fifo_size /= (unsigned)frame_size;
        }
        return 0;
 }
index 6e5d0cb4e3d737ff43f0eb63e298de817552d224..47ccb8f44adb7ab5e0353a17cd0e68c2e1c363f0 100644 (file)
@@ -777,6 +777,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" },
        { .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" },
        { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
+       { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
        { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
        {}
 };
index ea823e1100dac29ad087267adfb1ebcecb8ce3c6..98cd1908c0393856e8bfeec7622874079d60c3c1 100644 (file)
@@ -566,8 +566,8 @@ static void stac_init_power_map(struct hda_codec *codec)
                if (snd_hda_jack_tbl_get(codec, nid))
                        continue;
                if (def_conf == AC_JACK_PORT_COMPLEX &&
-                   !(spec->vref_mute_led_nid == nid ||
-                     is_jack_detectable(codec, nid))) {
+                   spec->vref_mute_led_nid != nid &&
+                   is_jack_detectable(codec, nid)) {
                        snd_hda_jack_detect_enable_callback(codec, nid,
                                                            STAC_PWR_EVENT,
                                                            jack_update_power);
@@ -4276,11 +4276,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
                        return err;
        }
 
-       stac_init_power_map(codec);
-
        return 0;
 }
 
+static int stac_build_controls(struct hda_codec *codec)
+{
+       int err = snd_hda_gen_build_controls(codec);
+
+       if (err < 0)
+               return err;
+       stac_init_power_map(codec);
+       return 0;
+}
 
 static int stac_init(struct hda_codec *codec)
 {
@@ -4392,7 +4399,7 @@ static int stac_suspend(struct hda_codec *codec)
 #endif /* CONFIG_PM */
 
 static const struct hda_codec_ops stac_patch_ops = {
-       .build_controls = snd_hda_gen_build_controls,
+       .build_controls = stac_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
        .init = stac_init,
        .free = stac_free,
index 98523209f7391852cb0e517fb373c6635e2ded99..69a85164357c58ab66ae761edfe0449e9f9f471b 100644 (file)
@@ -458,12 +458,12 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
                if (params_width(params) == 16) {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
                                CS4265_DAC_CTL_DIF, (1 << 5));
-                       snd_soc_update_bits(codec, CS4265_ADC_CTL,
+                       snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
                                CS4265_SPDIF_CTL2_DIF, (1 << 7));
                } else {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
                                CS4265_DAC_CTL_DIF, (3 << 5));
-                       snd_soc_update_bits(codec, CS4265_ADC_CTL,
+                       snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
                                CS4265_SPDIF_CTL2_DIF, (1 << 7));
                }
                break;
@@ -472,7 +472,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
                        CS4265_DAC_CTL_DIF, 0);
                snd_soc_update_bits(codec, CS4265_ADC_CTL,
                        CS4265_ADC_DIF, 0);
-               snd_soc_update_bits(codec, CS4265_ADC_CTL,
+               snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
                        CS4265_SPDIF_CTL2_DIF, (1 << 6));
 
                break;
index e4f6102efc1a4855e9b2d302c36c05655abb4c70..b86b426f159d136c07cbc4491c8241db0c5a2001 100644 (file)
@@ -51,7 +51,7 @@ static struct reg_default rt286_index_def[] = {
        { 0x04, 0xaf01 },
        { 0x08, 0x000d },
        { 0x09, 0xd810 },
-       { 0x0a, 0x0060 },
+       { 0x0a, 0x0120 },
        { 0x0b, 0x0000 },
        { 0x0d, 0x2800 },
        { 0x0f, 0x0000 },
@@ -60,7 +60,7 @@ static struct reg_default rt286_index_def[] = {
        { 0x33, 0x0208 },
        { 0x49, 0x0004 },
        { 0x4f, 0x50e9 },
-       { 0x50, 0x2c00 },
+       { 0x50, 0x2000 },
        { 0x63, 0x2902 },
        { 0x67, 0x1111 },
        { 0x68, 0x1016 },
@@ -104,7 +104,6 @@ static const struct reg_default rt286_reg[] = {
        { 0x02170700, 0x00000000 },
        { 0x02270100, 0x00000000 },
        { 0x02370100, 0x00000000 },
-       { 0x02040000, 0x00004002 },
        { 0x01870700, 0x00000020 },
        { 0x00830000, 0x000000c3 },
        { 0x00930000, 0x000000c3 },
@@ -192,7 +191,6 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value)
        /*handle index registers*/
        if (reg <= 0xff) {
                rt286_hw_write(client, RT286_COEF_INDEX, reg);
-               reg = RT286_PROC_COEF;
                for (i = 0; i < INDEX_CACHE_SIZE; i++) {
                        if (reg == rt286->index_cache[i].reg) {
                                rt286->index_cache[i].def = value;
@@ -200,6 +198,7 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value)
                        }
 
                }
+               reg = RT286_PROC_COEF;
        }
 
        data[0] = (reg >> 24) & 0xff;
index 484b3bbe8624be1cff63a1bd5154c8034ab80ef8..4021cd4357409e9ee95080ae9e61bb5752774272 100644 (file)
@@ -647,7 +647,7 @@ int ssm2602_probe(struct device *dev, enum ssm2602_type type,
                return -ENOMEM;
 
        dev_set_drvdata(dev, ssm2602);
-       ssm2602->type = SSM2602;
+       ssm2602->type = type;
        ssm2602->regmap = regmap;
 
        return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602,
index 87eb5776a39b627feb046a8be130d2c85da12c6a..de6ab06f58a54dbd32b871273a530db6e8d9fbed 100644 (file)
@@ -748,8 +748,9 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private,
-               unsigned int fmt)
+static int _fsl_ssi_set_dai_fmt(struct device *dev,
+                               struct fsl_ssi_private *ssi_private,
+                               unsigned int fmt)
 {
        struct regmap *regs = ssi_private->regs;
        u32 strcr = 0, stcr, srcr, scr, mask;
@@ -758,7 +759,7 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private,
        ssi_private->dai_fmt = fmt;
 
        if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) {
-               dev_err(&ssi_private->pdev->dev, "baudclk is missing which is necessary for master mode\n");
+               dev_err(dev, "baudclk is missing which is necessary for master mode\n");
                return -EINVAL;
        }
 
@@ -913,7 +914,7 @@ static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
 {
        struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
 
-       return _fsl_ssi_set_dai_fmt(ssi_private, fmt);
+       return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt);
 }
 
 /**
@@ -1387,7 +1388,8 @@ static int fsl_ssi_probe(struct platform_device *pdev)
 
 done:
        if (ssi_private->dai_fmt)
-               _fsl_ssi_set_dai_fmt(ssi_private, ssi_private->dai_fmt);
+               _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
+                                    ssi_private->dai_fmt);
 
        return 0;
 
index 8d8e4b59049f2669fc503e05dd66ef52cb9e960c..fb9e05c9f47199c7bdcbd8413846899b074615ce 100644 (file)
@@ -165,13 +165,14 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
        struct rk_i2s_dev *i2s = to_info(cpu_dai);
        unsigned int mask = 0, val = 0;
 
-       mask = I2S_CKR_MSS_SLAVE;
+       mask = I2S_CKR_MSS_MASK;
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBS_CFS:
-               val = I2S_CKR_MSS_SLAVE;
+               /* Set source clock in Master mode */
+               val = I2S_CKR_MSS_MASTER;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
-               val = I2S_CKR_MSS_MASTER;
+               val = I2S_CKR_MSS_SLAVE;
                break;
        default:
                return -EINVAL;
@@ -361,6 +362,8 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
        case I2S_XFER:
        case I2S_CLR:
        case I2S_RXDR:
+       case I2S_FIFOLR:
+       case I2S_INTSR:
                return true;
        default:
                return false;
@@ -370,8 +373,8 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
 static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case I2S_FIFOLR:
        case I2S_INTSR:
+       case I2S_CLR:
                return true;
        default:
                return false;
@@ -381,8 +384,6 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
 static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case I2S_FIFOLR:
-               return true;
        default:
                return false;
        }
index 3092b58fede640cf369e69e9cb0f72e6e89e5593..cecfab3cc9488e14ee018cff6ec327522f203d7d 100644 (file)
@@ -102,13 +102,11 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
        fe->dpcm[stream].runtime = fe_substream->runtime;
 
        ret = dpcm_path_get(fe, stream, &list);
-       if (ret < 0) {
-               mutex_unlock(&fe->card->mutex);
+       if (ret < 0)
                goto fe_err;
-       } else if (ret == 0) {
+       else if (ret == 0)
                dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
                        fe->dai_link->name, stream ? "capture" : "playback");
-       }
 
        /* calculate valid and active FE <-> BE dpcms */
        dpcm_process_paths(fe, stream, &list, 1);
index 889f4e3d35dc53548f59f159666fbc668f7bd39b..d074aa91b023f8db8f7dace7f62db25a564517ce 100644 (file)
@@ -3203,7 +3203,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
        unsigned int val, mask;
        void *data;
 
-       if (!component->regmap)
+       if (!component->regmap || !params->num_regs)
                return -EINVAL;
 
        len = params->num_regs * component->val_bytes;
index f65fc0987cfb8cc9bdc45ce366a6c3bfa8641f55..b7a7c805d63fa8f977460c8757f78e2c02d1dca8 100644 (file)
@@ -100,15 +100,19 @@ static int control_put(struct snd_kcontrol *kcontrol,
        struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card);
        int pos = kcontrol->private_value;
        int v = ucontrol->value.integer.value[0];
-       unsigned char cmd = EP1_CMD_WRITE_IO;
+       unsigned char cmd;
 
-       if (cdev->chip.usb_id ==
-               USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1))
-               cmd = EP1_CMD_DIMM_LEDS;
-
-       if (cdev->chip.usb_id ==
-               USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER))
+       switch (cdev->chip.usb_id) {
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
                cmd = EP1_CMD_DIMM_LEDS;
+               break;
+       default:
+               cmd = EP1_CMD_WRITE_IO;
+               break;
+       }
 
        if (pos & CNT_INTVAL) {
                int i = pos & ~CNT_INTVAL;
index 5a0e95edf4dfed55288acc59fb9ea88f49f0dcd6..15fe792e1e9655bf35e016f0a4de1a1f72737c7f 100644 (file)
@@ -15,7 +15,7 @@
 #include <syslog.h>
 #include <unistd.h>
 #include <linux/usb/ch9.h>
-#include "../../uapi/usbip.h"
+#include <linux/usbip.h>
 
 #ifndef USBIDS_FILE
 #define USBIDS_FILE "/usr/share/hwdata/usb.ids"
index 01124ef3690a03e1c9ecdc464b954b19c6354440..416baedfc89fb249830a54850de7ecfde4b1cf60 100644 (file)
@@ -71,7 +71,7 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
                                  struct vgic_lr lr_desc)
 {
        if (!(lr_desc.state & LR_STATE_MASK))
-               set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+               __set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
 }
 
 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
index 33712fb26eb11caa0f14bbf3b21a9e459c18252a..95519bc959edcaaefe27d1fd9e1d7a51aa52b6e2 100644 (file)
@@ -110,7 +110,7 @@ static bool largepages_enabled = true;
 bool kvm_is_mmio_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
-               return PageReserved(pfn_to_page(pfn));
+               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
 
        return true;
 }
@@ -1725,7 +1725,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
        rcu_read_lock();
        pid = rcu_dereference(target->pid);
        if (pid)
-               task = get_pid_task(target->pid, PIDTYPE_PID);
+               task = get_pid_task(pid, PIDTYPE_PID);
        rcu_read_unlock();
        if (!task)
                return ret;